python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
|---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Implementation of the host-to-chip commands (aka request/confirmation) of the
* hardware API.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/etherdevice.h>
#include "hif_tx.h"
#include "wfx.h"
#include "bh.h"
#include "hwio.h"
#include "debug.h"
#include "sta.h"
void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd)
{
init_completion(&hif_cmd->ready);
init_completion(&hif_cmd->done);
mutex_init(&hif_cmd->lock);
}
static void wfx_fill_header(struct wfx_hif_msg *hif, int if_id, unsigned int cmd, size_t size)
{
if (if_id == -1)
if_id = 2;
WARN(cmd > 0x3f, "invalid hardware command %#.2x", cmd);
WARN(size > 0xFFF, "requested buffer is too large: %zu bytes", size);
WARN(if_id > 0x3, "invalid interface ID %d", if_id);
hif->len = cpu_to_le16(size + 4);
hif->id = cmd;
hif->interface = if_id;
}
static void *wfx_alloc_hif(size_t body_len, struct wfx_hif_msg **hif)
{
*hif = kzalloc(sizeof(struct wfx_hif_msg) + body_len, GFP_KERNEL);
if (*hif)
return (*hif)->body;
else
return NULL;
}
int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request,
void *reply, size_t reply_len, bool no_reply)
{
const char *mib_name = "";
const char *mib_sep = "";
int cmd = request->id;
int vif = request->interface;
int ret;
/* Do not wait for any reply if chip is frozen */
if (wdev->chip_frozen)
return -ETIMEDOUT;
mutex_lock(&wdev->hif_cmd.lock);
WARN(wdev->hif_cmd.buf_send, "data locking error");
/* Note: call to complete() below has an implicit memory barrier that hopefully protect
* buf_send
*/
wdev->hif_cmd.buf_send = request;
wdev->hif_cmd.buf_recv = reply;
wdev->hif_cmd.len_recv = reply_len;
complete(&wdev->hif_cmd.ready);
wfx_bh_request_tx(wdev);
if (no_reply) {
/* Chip won't reply. Ensure the wq has send the buffer before to continue. */
flush_workqueue(wdev->bh_wq);
ret = 0;
goto end;
}
if (wdev->poll_irq)
wfx_bh_poll_irq(wdev);
ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ);
if (!ret) {
dev_err(wdev->dev, "chip is abnormally long to answer\n");
reinit_completion(&wdev->hif_cmd.ready);
ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 3 * HZ);
}
if (!ret) {
dev_err(wdev->dev, "chip did not answer\n");
wfx_pending_dump_old_frames(wdev, 3000);
wdev->chip_frozen = true;
reinit_completion(&wdev->hif_cmd.done);
ret = -ETIMEDOUT;
} else {
ret = wdev->hif_cmd.ret;
}
end:
wdev->hif_cmd.buf_send = NULL;
mutex_unlock(&wdev->hif_cmd.lock);
if (ret &&
(cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) {
mib_name = wfx_get_mib_name(((u16 *)request)[2]);
mib_sep = "/";
}
if (ret < 0)
dev_err(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned error %d\n",
wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
if (ret > 0)
dev_warn(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned status %d\n",
wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
return ret;
}
/* This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any request anymore.
* Obviously, only call this function during device unregister.
*/
int wfx_hif_shutdown(struct wfx_dev *wdev)
{
int ret;
struct wfx_hif_msg *hif;
wfx_alloc_hif(0, &hif);
if (!hif)
return -ENOMEM;
wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0);
ret = wfx_cmd_send(wdev, hif, NULL, 0, true);
if (wdev->pdata.gpio_wakeup)
gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
else
wfx_control_reg_write(wdev, 0);
kfree(hif);
return ret;
}
int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len)
{
int ret;
size_t buf_len = sizeof(struct wfx_hif_req_configuration) + len;
struct wfx_hif_msg *hif;
struct wfx_hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif);
if (!hif)
return -ENOMEM;
body->length = cpu_to_le16(len);
memcpy(body->pds_data, conf, len);
wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len);
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_reset(struct wfx_vif *wvif, bool reset_stat)
{
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif);
if (!hif)
return -ENOMEM;
body->reset_stat = reset_stat;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len)
{
int ret;
struct wfx_hif_msg *hif;
int buf_len = sizeof(struct wfx_hif_cnf_read_mib) + val_len;
struct wfx_hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif);
struct wfx_hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL);
if (!body || !reply) {
ret = -ENOMEM;
goto out;
}
body->mib_id = cpu_to_le16(mib_id);
wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, reply, buf_len, false);
if (!ret && mib_id != le16_to_cpu(reply->mib_id)) {
dev_warn(wdev->dev, "%s: confirmation mismatch request\n", __func__);
ret = -EIO;
}
if (ret == -ENOMEM)
dev_err(wdev->dev, "buffer is too small to receive %s (%zu < %d)\n",
wfx_get_mib_name(mib_id), val_len, le16_to_cpu(reply->length));
if (!ret)
memcpy(val, &reply->mib_data, le16_to_cpu(reply->length));
else
memset(val, 0xFF, val_len);
out:
kfree(hif);
kfree(reply);
return ret;
}
int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len)
{
int ret;
struct wfx_hif_msg *hif;
int buf_len = sizeof(struct wfx_hif_req_write_mib) + val_len;
struct wfx_hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif);
if (!hif)
return -ENOMEM;
body->mib_id = cpu_to_le16(mib_id);
body->length = cpu_to_le16(val_len);
memcpy(&body->mib_data, val, val_len);
wfx_fill_header(hif, vif_id, HIF_REQ_ID_WRITE_MIB, buf_len);
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
int chan_start_idx, int chan_num)
{
int ret, i;
struct wfx_hif_msg *hif;
size_t buf_len = sizeof(struct wfx_hif_req_start_scan_alt) + chan_num * sizeof(u8);
struct wfx_hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif);
WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params");
WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params");
if (!hif)
return -ENOMEM;
for (i = 0; i < req->n_ssids; i++) {
memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid, IEEE80211_MAX_SSID_LEN);
body->ssid_def[i].ssid_length = cpu_to_le32(req->ssids[i].ssid_len);
}
body->num_of_ssids = HIF_API_MAX_NB_SSIDS;
body->maintain_current_bss = 1;
body->disallow_ps = 1;
body->tx_power_level = cpu_to_le32(req->channels[chan_start_idx]->max_power);
body->num_of_channels = chan_num;
for (i = 0; i < chan_num; i++)
body->channel_list[i] = req->channels[i + chan_start_idx]->hw_value;
if (req->no_cck)
body->max_transmit_rate = API_RATE_INDEX_G_6MBPS;
else
body->max_transmit_rate = API_RATE_INDEX_B_1MBPS;
if (req->channels[chan_start_idx]->flags & IEEE80211_CHAN_NO_IR) {
body->min_channel_time = cpu_to_le32(50);
body->max_channel_time = cpu_to_le32(150);
} else {
body->min_channel_time = cpu_to_le32(10);
body->max_channel_time = cpu_to_le32(50);
body->num_of_probe_requests = 2;
body->probe_delay = 100;
}
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_stop_scan(struct wfx_vif *wvif)
{
int ret;
struct wfx_hif_msg *hif;
/* body associated to HIF_REQ_ID_STOP_SCAN is empty */
wfx_alloc_hif(0, &hif);
if (!hif)
return -ENOMEM;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct ieee80211_channel *channel, const u8 *ssid, int ssid_len)
{
struct ieee80211_vif *vif = container_of(conf, struct ieee80211_vif,
bss_conf);
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
WARN_ON(!conf->beacon_int);
WARN_ON(!conf->basic_rates);
WARN_ON(sizeof(body->ssid) < ssid_len);
WARN(!vif->cfg.ibss_joined && !ssid_len, "joining an unknown BSS");
if (!hif)
return -ENOMEM;
body->infrastructure_bss_mode = !vif->cfg.ibss_joined;
body->short_preamble = conf->use_short_preamble;
body->probe_for_join = !(channel->flags & IEEE80211_CHAN_NO_IR);
body->channel_number = channel->hw_value;
body->beacon_interval = cpu_to_le32(conf->beacon_int);
body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
if (ssid) {
body->ssid_length = cpu_to_le32(ssid_len);
memcpy(body->ssid, ssid, ssid_len);
}
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count)
{
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body), &hif);
if (!hif)
return -ENOMEM;
body->aid = cpu_to_le16(aid);
body->beacon_lost_count = beacon_lost_count;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_add_key(struct wfx_dev *wdev, const struct wfx_hif_req_add_key *arg)
{
int ret;
struct wfx_hif_msg *hif;
/* FIXME: only send necessary bits */
struct wfx_hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif);
if (!hif)
return -ENOMEM;
/* FIXME: swap bytes as necessary in body */
memcpy(body, arg, sizeof(*body));
if (wfx_api_older_than(wdev, 1, 5))
/* Legacy firmwares expect that add_key to be sent on right interface. */
wfx_fill_header(hif, arg->int_id, HIF_REQ_ID_ADD_KEY, sizeof(*body));
else
wfx_fill_header(hif, -1, HIF_REQ_ID_ADD_KEY, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_remove_key(struct wfx_dev *wdev, int idx)
{
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif);
if (!hif)
return -ENOMEM;
body->entry_index = idx;
wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
const struct ieee80211_tx_queue_params *arg)
{
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body), &hif);
if (!body)
return -ENOMEM;
WARN_ON(arg->aifs > 255);
if (!hif)
return -ENOMEM;
body->aifsn = arg->aifs;
body->cw_min = cpu_to_le16(arg->cw_min);
body->cw_max = cpu_to_le16(arg->cw_max);
body->tx_op_limit = cpu_to_le16(arg->txop * USEC_PER_TXOP);
body->queue_id = 3 - queue;
/* API 2.0 has changed queue IDs values */
if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BE)
body->queue_id = HIF_QUEUE_ID_BACKGROUND;
if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BK)
body->queue_id = HIF_QUEUE_ID_BESTEFFORT;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
{
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif);
if (!body)
return -ENOMEM;
if (!hif)
return -ENOMEM;
if (ps) {
body->enter_psm = 1;
/* Firmware does not support more than 128ms */
body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255);
if (body->fast_psm_idle_period)
body->fast_psm = 1;
}
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
const struct ieee80211_channel *channel)
{
struct ieee80211_vif *vif = container_of(conf, struct ieee80211_vif,
bss_conf);
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
WARN_ON(!conf->beacon_int);
if (!hif)
return -ENOMEM;
body->dtim_period = conf->dtim_period;
body->short_preamble = conf->use_short_preamble;
body->channel_number = channel->hw_value;
body->beacon_interval = cpu_to_le32(conf->beacon_int);
body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
body->ssid_length = vif->cfg.ssid_len;
memcpy(body->ssid, vif->cfg.ssid, vif->cfg.ssid_len);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable)
{
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body), &hif);
if (!hif)
return -ENOMEM;
body->enable_beaconing = enable ? 1 : 0;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp)
{
int ret;
struct wfx_hif_msg *hif;
struct wfx_hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif);
if (!hif)
return -ENOMEM;
if (mac_addr)
ether_addr_copy(body->mac_addr, mac_addr);
body->mfpc = mfp ? 1 : 0;
body->unmap = unmap ? 1 : 0;
body->peer_sta_id = sta_id;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
{
int ret;
struct wfx_hif_msg *hif;
int buf_len = sizeof(struct wfx_hif_req_update_ie) + ies_len;
struct wfx_hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
if (!hif)
return -ENOMEM;
body->beacon = 1;
body->num_ies = cpu_to_le16(1);
memcpy(body->ie, ies, ies_len);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
return ret;
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/hif_tx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Firmware loading.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/bitfield.h>
#include "fwio.h"
#include "wfx.h"
#include "hwio.h"
/* Addresses below are in SRAM area */
#define WFX_DNLD_FIFO 0x09004000
#define DNLD_BLOCK_SIZE 0x0400
#define DNLD_FIFO_SIZE 0x8000 /* (32 * DNLD_BLOCK_SIZE) */
/* Download Control Area (DCA) */
#define WFX_DCA_IMAGE_SIZE 0x0900C000
#define WFX_DCA_PUT 0x0900C004
#define WFX_DCA_GET 0x0900C008
#define WFX_DCA_HOST_STATUS 0x0900C00C
#define HOST_READY 0x87654321
#define HOST_INFO_READ 0xA753BD99
#define HOST_UPLOAD_PENDING 0xABCDDCBA
#define HOST_UPLOAD_COMPLETE 0xD4C64A99
#define HOST_OK_TO_JUMP 0x174FC882
#define WFX_DCA_NCP_STATUS 0x0900C010
#define NCP_NOT_READY 0x12345678
#define NCP_READY 0x87654321
#define NCP_INFO_READY 0xBD53EF99
#define NCP_DOWNLOAD_PENDING 0xABCDDCBA
#define NCP_DOWNLOAD_COMPLETE 0xCAFEFECA
#define NCP_AUTH_OK 0xD4C64A99
#define NCP_AUTH_FAIL 0x174FC882
#define NCP_PUB_KEY_RDY 0x7AB41D19
#define WFX_DCA_FW_SIGNATURE 0x0900C014
#define FW_SIGNATURE_SIZE 0x40
#define WFX_DCA_FW_HASH 0x0900C054
#define FW_HASH_SIZE 0x08
#define WFX_DCA_FW_VERSION 0x0900C05C
#define FW_VERSION_SIZE 0x04
#define WFX_DCA_RESERVED 0x0900C060
#define DCA_RESERVED_SIZE 0x20
#define WFX_STATUS_INFO 0x0900C080
#define WFX_BOOTLOADER_LABEL 0x0900C084
#define BOOTLOADER_LABEL_SIZE 0x3C
#define WFX_PTE_INFO 0x0900C0C0
#define PTE_INFO_KEYSET_IDX 0x0D
#define PTE_INFO_SIZE 0x10
#define WFX_ERR_INFO 0x0900C0D0
#define ERR_INVALID_SEC_TYPE 0x05
#define ERR_SIG_VERIF_FAILED 0x0F
#define ERR_AES_CTRL_KEY 0x10
#define ERR_ECC_PUB_KEY 0x11
#define ERR_MAC_KEY 0x18
#define DCA_TIMEOUT 50 /* milliseconds */
#define WAKEUP_TIMEOUT 200 /* milliseconds */
static const char * const fwio_errors[] = {
[ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption",
[ERR_SIG_VERIF_FAILED] = "Signature verification failed",
[ERR_AES_CTRL_KEY] = "AES control key not initialized",
[ERR_ECC_PUB_KEY] = "ECC public key not initialized",
[ERR_MAC_KEY] = "MAC key not initialized",
};
/* request_firmware() allocate data using vmalloc(). It is not compatible with underlying hardware
* that use DMA. Function below detect this case and allocate a bounce buffer if necessary.
*
* Notice that, in doubt, you can enable CONFIG_DEBUG_SG to ask kernel to detect this problem at
* runtime (else, kernel silently fail).
*
* NOTE: it may also be possible to use 'pages' from struct firmware and avoid bounce buffer
*/
static int wfx_sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf, size_t len)
{
int ret;
const u8 *tmp;
if (!virt_addr_valid(buf)) {
tmp = kmemdup(buf, len, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
} else {
tmp = buf;
}
ret = wfx_sram_buf_write(wdev, addr, tmp, len);
if (tmp != buf)
kfree(tmp);
return ret;
}
static int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
const struct firmware **fw, int *file_offset)
{
int keyset_file;
char filename[256];
const char *data;
int ret;
snprintf(filename, sizeof(filename), "%s_%02X.sec",
wdev->pdata.file_fw, keyset_chip);
ret = firmware_request_nowarn(fw, filename, wdev->dev);
if (ret) {
dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n",
filename, wdev->pdata.file_fw);
snprintf(filename, sizeof(filename), "%s.sec", wdev->pdata.file_fw);
ret = request_firmware(fw, filename, wdev->dev);
if (ret) {
dev_err(wdev->dev, "can't load %s\n", filename);
*fw = NULL;
return ret;
}
}
data = (*fw)->data;
if (memcmp(data, "KEYSET", 6) != 0) {
/* Legacy firmware format */
*file_offset = 0;
keyset_file = 0x90;
} else {
*file_offset = 8;
keyset_file = (hex_to_bin(data[6]) * 16) | hex_to_bin(data[7]);
if (keyset_file < 0) {
dev_err(wdev->dev, "%s corrupted\n", filename);
release_firmware(*fw);
*fw = NULL;
return -EINVAL;
}
}
if (keyset_file != keyset_chip) {
dev_err(wdev->dev, "firmware keyset is incompatible with chip (file: 0x%02X, chip: 0x%02X)\n",
keyset_file, keyset_chip);
release_firmware(*fw);
*fw = NULL;
return -ENODEV;
}
wdev->keyset = keyset_file;
return 0;
}
static int wait_ncp_status(struct wfx_dev *wdev, u32 status)
{
ktime_t now, start;
u32 reg;
int ret;
start = ktime_get();
for (;;) {
ret = wfx_sram_reg_read(wdev, WFX_DCA_NCP_STATUS, ®);
if (ret < 0)
return -EIO;
now = ktime_get();
if (reg == status)
break;
if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
return -ETIMEDOUT;
}
if (ktime_compare(now, start))
dev_dbg(wdev->dev, "chip answer after %lldus\n", ktime_us_delta(now, start));
else
dev_dbg(wdev->dev, "chip answer immediately\n");
return 0;
}
static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len)
{
int ret;
u32 offs, bytes_done = 0;
ktime_t now, start;
if (len % DNLD_BLOCK_SIZE) {
dev_err(wdev->dev, "firmware size is not aligned. Buffer overrun will occur\n");
return -EIO;
}
offs = 0;
while (offs < len) {
start = ktime_get();
for (;;) {
now = ktime_get();
if (offs + DNLD_BLOCK_SIZE - bytes_done < DNLD_FIFO_SIZE)
break;
if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
return -ETIMEDOUT;
ret = wfx_sram_reg_read(wdev, WFX_DCA_GET, &bytes_done);
if (ret < 0)
return ret;
}
if (ktime_compare(now, start))
dev_dbg(wdev->dev, "answer after %lldus\n", ktime_us_delta(now, start));
ret = wfx_sram_write_dma_safe(wdev, WFX_DNLD_FIFO + (offs % DNLD_FIFO_SIZE),
data + offs, DNLD_BLOCK_SIZE);
if (ret < 0)
return ret;
/* The device seems to not support writing 0 in this register during first loop */
offs += DNLD_BLOCK_SIZE;
ret = wfx_sram_reg_write(wdev, WFX_DCA_PUT, offs);
if (ret < 0)
return ret;
}
return 0;
}
static void print_boot_status(struct wfx_dev *wdev)
{
u32 reg;
wfx_sram_reg_read(wdev, WFX_STATUS_INFO, ®);
if (reg == 0x12345678)
return;
wfx_sram_reg_read(wdev, WFX_ERR_INFO, ®);
if (reg < ARRAY_SIZE(fwio_errors) && fwio_errors[reg])
dev_info(wdev->dev, "secure boot: %s\n", fwio_errors[reg]);
else
dev_info(wdev->dev, "secure boot: Error %#02x\n", reg);
}
static int load_firmware_secure(struct wfx_dev *wdev)
{
const struct firmware *fw = NULL;
int header_size;
int fw_offset;
ktime_t start;
u8 *buf;
int ret;
BUILD_BUG_ON(PTE_INFO_SIZE > BOOTLOADER_LABEL_SIZE);
buf = kmalloc(BOOTLOADER_LABEL_SIZE + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_READY);
ret = wait_ncp_status(wdev, NCP_INFO_READY);
if (ret)
goto error;
wfx_sram_buf_read(wdev, WFX_BOOTLOADER_LABEL, buf, BOOTLOADER_LABEL_SIZE);
buf[BOOTLOADER_LABEL_SIZE] = 0;
dev_dbg(wdev->dev, "bootloader: \"%s\"\n", buf);
wfx_sram_buf_read(wdev, WFX_PTE_INFO, buf, PTE_INFO_SIZE);
ret = get_firmware(wdev, buf[PTE_INFO_KEYSET_IDX], &fw, &fw_offset);
if (ret)
goto error;
header_size = fw_offset + FW_SIGNATURE_SIZE + FW_HASH_SIZE;
wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_INFO_READ);
ret = wait_ncp_status(wdev, NCP_READY);
if (ret)
goto error;
wfx_sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); /* Fifo init */
wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_VERSION, "\x01\x00\x00\x00", FW_VERSION_SIZE);
wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_SIGNATURE, fw->data + fw_offset,
FW_SIGNATURE_SIZE);
wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_HASH, fw->data + fw_offset + FW_SIGNATURE_SIZE,
FW_HASH_SIZE);
wfx_sram_reg_write(wdev, WFX_DCA_IMAGE_SIZE, fw->size - header_size);
wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_PENDING);
ret = wait_ncp_status(wdev, NCP_DOWNLOAD_PENDING);
if (ret)
goto error;
start = ktime_get();
ret = upload_firmware(wdev, fw->data + header_size, fw->size - header_size);
if (ret)
goto error;
dev_dbg(wdev->dev, "firmware load after %lldus\n",
ktime_us_delta(ktime_get(), start));
wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_COMPLETE);
ret = wait_ncp_status(wdev, NCP_AUTH_OK);
/* Legacy ROM support */
if (ret < 0)
ret = wait_ncp_status(wdev, NCP_PUB_KEY_RDY);
if (ret < 0)
goto error;
wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_OK_TO_JUMP);
error:
kfree(buf);
release_firmware(fw);
if (ret)
print_boot_status(wdev);
return ret;
}
static int init_gpr(struct wfx_dev *wdev)
{
int ret, i;
static const struct {
int index;
u32 value;
} gpr_init[] = {
{ 0x07, 0x208775 },
{ 0x08, 0x2EC020 },
{ 0x09, 0x3C3C3C },
{ 0x0B, 0x322C44 },
{ 0x0C, 0xA06497 },
};
for (i = 0; i < ARRAY_SIZE(gpr_init); i++) {
ret = wfx_igpr_reg_write(wdev, gpr_init[i].index, gpr_init[i].value);
if (ret < 0)
return ret;
dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index, gpr_init[i].value);
}
return 0;
}
int wfx_init_device(struct wfx_dev *wdev)
{
int ret;
int hw_revision, hw_type;
int wakeup_timeout = 50; /* ms */
ktime_t now, start;
u32 reg;
reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_BYTE_ORDER_ABCD;
if (wdev->pdata.use_rising_clk)
reg |= CFG_CLK_RISE_EDGE;
ret = wfx_config_reg_write(wdev, reg);
if (ret < 0) {
dev_err(wdev->dev, "bus returned an error during first write access. Host configuration error?\n");
return -EIO;
}
ret = wfx_config_reg_read(wdev, ®);
if (ret < 0) {
dev_err(wdev->dev, "bus returned an error during first read access. Bus configuration error?\n");
return -EIO;
}
if (reg == 0 || reg == ~0) {
dev_err(wdev->dev, "chip mute. Bus configuration error or chip wasn't reset?\n");
return -EIO;
}
dev_dbg(wdev->dev, "initial config register value: %08x\n", reg);
hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg);
if (hw_revision == 0) {
dev_err(wdev->dev, "bad hardware revision number: %d\n", hw_revision);
return -ENODEV;
}
hw_type = FIELD_GET(CFG_DEVICE_ID_TYPE, reg);
if (hw_type == 1) {
dev_notice(wdev->dev, "development hardware detected\n");
wakeup_timeout = 2000;
}
ret = init_gpr(wdev);
if (ret < 0)
return ret;
ret = wfx_control_reg_write(wdev, CTRL_WLAN_WAKEUP);
if (ret < 0)
return -EIO;
start = ktime_get();
for (;;) {
ret = wfx_control_reg_read(wdev, ®);
now = ktime_get();
if (reg & CTRL_WLAN_READY)
break;
if (ktime_after(now, ktime_add_ms(start, wakeup_timeout))) {
dev_err(wdev->dev, "chip didn't wake up. Chip wasn't reset?\n");
return -ETIMEDOUT;
}
}
dev_dbg(wdev->dev, "chip wake up after %lldus\n", ktime_us_delta(now, start));
ret = wfx_config_reg_write_bits(wdev, CFG_CPU_RESET, 0);
if (ret < 0)
return ret;
ret = load_firmware_secure(wdev);
if (ret < 0)
return ret;
return wfx_config_reg_write_bits(wdev,
CFG_DIRECT_ACCESS_MODE |
CFG_IRQ_ENABLE_DATA |
CFG_IRQ_ENABLE_WRDY,
CFG_IRQ_ENABLE_DATA);
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/fwio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Data receiving implementation.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "data_rx.h"
#include "wfx.h"
#include "bh.h"
#include "sta.h"
static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
int params, tid;
if (wfx_api_older_than(wvif->wdev, 3, 6))
return;
switch (mgmt->u.action.u.addba_req.action_code) {
case WLAN_ACTION_ADDBA_REQ:
params = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
tid = (params & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
ieee80211_start_rx_ba_session_offl(vif, mgmt->sa, tid);
break;
case WLAN_ACTION_DELBA:
params = le16_to_cpu(mgmt->u.action.u.delba.params);
tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
ieee80211_stop_rx_ba_session_offl(vif, mgmt->sa, tid);
break;
}
}
void wfx_rx_cb(struct wfx_vif *wvif, const struct wfx_hif_ind_rx *arg, struct sk_buff *skb)
{
struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
memset(hdr, 0, sizeof(*hdr));
if (arg->status == HIF_STATUS_RX_FAIL_MIC)
hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED;
else if (arg->status)
goto drop;
if (skb->len < sizeof(struct ieee80211_pspoll)) {
dev_warn(wvif->wdev->dev, "malformed SDU received\n");
goto drop;
}
hdr->band = NL80211_BAND_2GHZ;
hdr->freq = ieee80211_channel_to_frequency(arg->channel_number, hdr->band);
if (arg->rxed_rate >= 14) {
hdr->encoding = RX_ENC_HT;
hdr->rate_idx = arg->rxed_rate - 14;
} else if (arg->rxed_rate >= 4) {
hdr->rate_idx = arg->rxed_rate - 2;
} else {
hdr->rate_idx = arg->rxed_rate;
}
if (!arg->rcpi_rssi) {
hdr->flag |= RX_FLAG_NO_SIGNAL_VAL;
dev_info(wvif->wdev->dev, "received frame without RSSI data\n");
}
hdr->signal = arg->rcpi_rssi / 2 - 110;
hdr->antenna = 0;
if (arg->encryp)
hdr->flag |= RX_FLAG_DECRYPTED;
/* Block ack negotiation is offloaded by the firmware. However, re-ordering must be done by
* the mac80211.
*/
if (ieee80211_is_action(frame->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
skb->len > IEEE80211_MIN_ACTION_SIZE) {
wfx_rx_handle_ba(wvif, mgmt);
goto drop;
}
ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
return;
drop:
dev_kfree_skb(skb);
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/data_rx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Scan related functions.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <net/mac80211.h>
#include "scan.h"
#include "wfx.h"
#include "sta.h"
#include "hif_tx_mib.h"
static void wfx_ieee80211_scan_completed_compat(struct ieee80211_hw *hw, bool aborted)
{
struct cfg80211_scan_info info = {
.aborted = aborted,
};
ieee80211_scan_completed(hw, &info);
}
static int update_probe_tmpl(struct wfx_vif *wvif, struct cfg80211_scan_request *req)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct sk_buff *skb;
skb = ieee80211_probereq_get(wvif->wdev->hw, vif->addr, NULL, 0,
req->ie_len);
if (!skb)
return -ENOMEM;
skb_put_data(skb, req->ie, req->ie_len);
wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBREQ, 0);
dev_kfree_skb(skb);
return 0;
}
static int send_scan_req(struct wfx_vif *wvif, struct cfg80211_scan_request *req, int start_idx)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct ieee80211_channel *ch_start, *ch_cur;
int i, ret;
for (i = start_idx; i < req->n_channels; i++) {
ch_start = req->channels[start_idx];
ch_cur = req->channels[i];
WARN(ch_cur->band != NL80211_BAND_2GHZ, "band not supported");
if (ch_cur->max_power != ch_start->max_power)
break;
if ((ch_cur->flags ^ ch_start->flags) & IEEE80211_CHAN_NO_IR)
break;
}
wfx_tx_lock_flush(wvif->wdev);
wvif->scan_abort = false;
reinit_completion(&wvif->scan_complete);
ret = wfx_hif_scan(wvif, req, start_idx, i - start_idx);
if (ret) {
wfx_tx_unlock(wvif->wdev);
return -EIO;
}
ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ);
if (!ret) {
wfx_hif_stop_scan(wvif);
ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ);
dev_dbg(wvif->wdev->dev, "scan timeout (%d channels done)\n",
wvif->scan_nb_chan_done);
}
if (!ret) {
dev_err(wvif->wdev->dev, "scan didn't stop\n");
ret = -ETIMEDOUT;
} else if (wvif->scan_abort) {
dev_notice(wvif->wdev->dev, "scan abort\n");
ret = -ECONNABORTED;
} else if (wvif->scan_nb_chan_done > i - start_idx) {
ret = -EIO;
} else {
ret = wvif->scan_nb_chan_done;
}
if (req->channels[start_idx]->max_power != vif->bss_conf.txpower)
wfx_hif_set_output_power(wvif, vif->bss_conf.txpower);
wfx_tx_unlock(wvif->wdev);
return ret;
}
/* It is not really necessary to run scan request asynchronously. However,
* there is a bug in "iw scan" when ieee80211_scan_completed() is called before
* wfx_hw_scan() return
*/
void wfx_hw_scan_work(struct work_struct *work)
{
struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work);
struct ieee80211_scan_request *hw_req = wvif->scan_req;
int chan_cur, ret, err;
mutex_lock(&wvif->wdev->conf_mutex);
mutex_lock(&wvif->scan_lock);
if (wvif->join_in_progress) {
dev_info(wvif->wdev->dev, "abort in-progress REQ_JOIN");
wfx_reset(wvif);
}
update_probe_tmpl(wvif, &hw_req->req);
chan_cur = 0;
err = 0;
do {
ret = send_scan_req(wvif, &hw_req->req, chan_cur);
if (ret > 0) {
chan_cur += ret;
err = 0;
}
if (!ret)
err++;
if (err > 2) {
dev_err(wvif->wdev->dev, "scan has not been able to start\n");
ret = -ETIMEDOUT;
}
} while (ret >= 0 && chan_cur < hw_req->req.n_channels);
mutex_unlock(&wvif->scan_lock);
mutex_unlock(&wvif->wdev->conf_mutex);
wfx_ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0);
}
int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
WARN_ON(hw_req->req.n_channels > HIF_API_MAX_NB_CHANNELS);
wvif->scan_req = hw_req;
schedule_work(&wvif->scan_work);
return 0;
}
void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wvif->scan_abort = true;
wfx_hif_stop_scan(wvif);
}
void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done)
{
wvif->scan_nb_chan_done = nb_chan_done;
complete(&wvif->scan_complete);
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/scan.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Queue between the tx operation and the bh workqueue.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/sched.h>
#include <net/mac80211.h>
#include "queue.h"
#include "wfx.h"
#include "sta.h"
#include "data_tx.h"
#include "traces.h"
void wfx_tx_lock(struct wfx_dev *wdev)
{
atomic_inc(&wdev->tx_lock);
}
void wfx_tx_unlock(struct wfx_dev *wdev)
{
int tx_lock = atomic_dec_return(&wdev->tx_lock);
WARN(tx_lock < 0, "inconsistent tx_lock value");
if (!tx_lock)
wfx_bh_request_tx(wdev);
}
void wfx_tx_flush(struct wfx_dev *wdev)
{
int ret;
/* Do not wait for any reply if chip is frozen */
if (wdev->chip_frozen)
return;
wfx_tx_lock(wdev);
mutex_lock(&wdev->hif_cmd.lock);
ret = wait_event_timeout(wdev->hif.tx_buffers_empty, !wdev->hif.tx_buffers_used,
msecs_to_jiffies(3000));
if (!ret) {
dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
wdev->hif.tx_buffers_used);
wfx_pending_dump_old_frames(wdev, 3000);
/* FIXME: drop pending frames here */
wdev->chip_frozen = true;
}
mutex_unlock(&wdev->hif_cmd.lock);
wfx_tx_unlock(wdev);
}
void wfx_tx_lock_flush(struct wfx_dev *wdev)
{
wfx_tx_lock(wdev);
wfx_tx_flush(wdev);
}
void wfx_tx_queues_init(struct wfx_vif *wvif)
{
/* The device is in charge to respect the details of the QoS parameters. The driver just
* ensure that it roughtly respect the priorities to avoid any shortage.
*/
const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 };
int i;
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
skb_queue_head_init(&wvif->tx_queue[i].normal);
skb_queue_head_init(&wvif->tx_queue[i].cab);
wvif->tx_queue[i].priority = priorities[i];
}
}
bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
{
return skb_queue_empty_lockless(&queue->normal) && skb_queue_empty_lockless(&queue->cab);
}
void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
{
int i;
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i]));
}
}
static void __wfx_tx_queue_drop(struct wfx_vif *wvif,
struct sk_buff_head *skb_queue, struct sk_buff_head *dropped)
{
struct sk_buff *skb, *tmp;
spin_lock_bh(&skb_queue->lock);
skb_queue_walk_safe(skb_queue, skb, tmp) {
__skb_unlink(skb, skb_queue);
skb_queue_head(dropped, skb);
}
spin_unlock_bh(&skb_queue->lock);
}
void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
struct sk_buff_head *dropped)
{
__wfx_tx_queue_drop(wvif, &queue->cab, dropped);
__wfx_tx_queue_drop(wvif, &queue->normal, dropped);
wake_up(&wvif->wdev->tx_dequeue);
}
void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb)
{
struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
skb_queue_tail(&queue->cab, skb);
else
skb_queue_tail(&queue->normal, skb);
}
void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
{
struct wfx_queue *queue;
struct wfx_vif *wvif;
struct wfx_hif_msg *hif;
struct sk_buff *skb;
WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__);
while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
hif = (struct wfx_hif_msg *)skb->data;
wvif = wdev_to_wvif(wdev, hif->interface);
if (wvif) {
queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
}
skb_queue_head(dropped, skb);
}
}
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
{
struct wfx_queue *queue;
struct wfx_hif_req_tx *req;
struct wfx_vif *wvif;
struct wfx_hif_msg *hif;
struct sk_buff *skb;
spin_lock_bh(&wdev->tx_pending.lock);
skb_queue_walk(&wdev->tx_pending, skb) {
hif = (struct wfx_hif_msg *)skb->data;
req = (struct wfx_hif_req_tx *)hif->body;
if (req->packet_id != packet_id)
continue;
spin_unlock_bh(&wdev->tx_pending.lock);
wvif = wdev_to_wvif(wdev, hif->interface);
if (wvif) {
queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
WARN_ON(skb_get_queue_mapping(skb) > 3);
WARN_ON(!atomic_read(&queue->pending_frames));
atomic_dec(&queue->pending_frames);
}
skb_unlink(skb, &wdev->tx_pending);
return skb;
}
spin_unlock_bh(&wdev->tx_pending.lock);
WARN(1, "cannot find packet in pending queue");
return NULL;
}
void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
{
ktime_t now = ktime_get();
struct wfx_tx_priv *tx_priv;
struct wfx_hif_req_tx *req;
struct sk_buff *skb;
bool first = true;
spin_lock_bh(&wdev->tx_pending.lock);
skb_queue_walk(&wdev->tx_pending, skb) {
tx_priv = wfx_skb_tx_priv(skb);
req = wfx_skb_txreq(skb);
if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp, limit_ms))) {
if (first) {
dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
limit_ms);
first = false;
}
dev_info(wdev->dev, " id %08x sent %lldms ago\n",
req->packet_id, ktime_ms_delta(now, tx_priv->xmit_timestamp));
}
}
spin_unlock_bh(&wdev->tx_pending.lock);
}
unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb)
{
ktime_t now = ktime_get();
struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
return ktime_us_delta(now, tx_priv->xmit_timestamp);
}
bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
int i;
if (vif->type != NL80211_IFTYPE_AP)
return false;
for (i = 0; i < IEEE80211_NUM_ACS; ++i)
/* Note: since only AP can have mcast frames in queue and only one vif can be AP,
* all queued frames has same interface id
*/
if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab))
return true;
return false;
}
static int wfx_tx_queue_get_weight(struct wfx_queue *queue)
{
return atomic_read(&queue->pending_frames) * queue->priority;
}
static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
{
struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)];
int i, j, num_queues = 0;
struct wfx_vif *wvif;
struct wfx_hif_msg *hif;
struct sk_buff *skb;
/* sort the queues */
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
WARN_ON(num_queues >= ARRAY_SIZE(queues));
queues[num_queues] = &wvif->tx_queue[i];
for (j = num_queues; j > 0; j--)
if (wfx_tx_queue_get_weight(queues[j]) <
wfx_tx_queue_get_weight(queues[j - 1]))
swap(queues[j - 1], queues[j]);
num_queues++;
}
}
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
if (!wvif->after_dtim_tx_allowed)
continue;
for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->cab);
if (!skb)
continue;
/* Note: since only AP can have mcast frames in queue and only one vif can
* be AP, all queued frames has same interface id
*/
hif = (struct wfx_hif_msg *)skb->data;
WARN_ON(hif->interface != wvif->id);
WARN_ON(queues[i] != &wvif->tx_queue[skb_get_queue_mapping(skb)]);
atomic_inc(&queues[i]->pending_frames);
trace_queues_stats(wdev, queues[i]);
return skb;
}
/* No more multicast to sent */
wvif->after_dtim_tx_allowed = false;
schedule_work(&wvif->update_tim_work);
}
for (i = 0; i < num_queues; i++) {
skb = skb_dequeue(&queues[i]->normal);
if (skb) {
atomic_inc(&queues[i]->pending_frames);
trace_queues_stats(wdev, queues[i]);
return skb;
}
}
return NULL;
}
struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
{
struct wfx_tx_priv *tx_priv;
struct sk_buff *skb;
if (atomic_read(&wdev->tx_lock))
return NULL;
skb = wfx_tx_queues_get_skb(wdev);
if (!skb)
return NULL;
skb_queue_tail(&wdev->tx_pending, skb);
wake_up(&wdev->tx_dequeue);
tx_priv = wfx_skb_tx_priv(skb);
tx_priv->xmit_timestamp = ktime_get();
return (struct wfx_hif_msg *)skb->data;
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/queue.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Data transmitting implementation.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include "data_tx.h"
#include "wfx.h"
#include "bh.h"
#include "sta.h"
#include "queue.h"
#include "debug.h"
#include "traces.h"
#include "hif_tx_mib.h"
static int wfx_get_hw_rate(struct wfx_dev *wdev, const struct ieee80211_tx_rate *rate)
{
struct ieee80211_supported_band *band;
if (rate->idx < 0)
return -1;
if (rate->flags & IEEE80211_TX_RC_MCS) {
if (rate->idx > 7) {
WARN(1, "wrong rate->idx value: %d", rate->idx);
return -1;
}
return rate->idx + 14;
}
/* The device only support 2GHz, else band information should be retrieved from
* ieee80211_tx_info
*/
band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
if (rate->idx >= band->n_bitrates) {
WARN(1, "wrong rate->idx value: %d", rate->idx);
return -1;
}
return band->bitrates[rate->idx].hw_value;
}
/* TX policy cache implementation */
static void wfx_tx_policy_build(struct wfx_vif *wvif, struct wfx_tx_policy *policy,
struct ieee80211_tx_rate *rates)
{
struct wfx_dev *wdev = wvif->wdev;
int i, rateid;
u8 count;
WARN(rates[0].idx < 0, "invalid rate policy");
memset(policy, 0, sizeof(*policy));
for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
if (rates[i].idx < 0)
break;
WARN_ON(rates[i].count > 15);
rateid = wfx_get_hw_rate(wdev, &rates[i]);
/* Pack two values in each byte of policy->rates */
count = rates[i].count;
if (rateid % 2)
count <<= 4;
policy->rates[rateid / 2] |= count;
}
}
static bool wfx_tx_policy_is_equal(const struct wfx_tx_policy *a, const struct wfx_tx_policy *b)
{
return !memcmp(a->rates, b->rates, sizeof(a->rates));
}
static int wfx_tx_policy_find(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *wanted)
{
struct wfx_tx_policy *it;
list_for_each_entry(it, &cache->used, link)
if (wfx_tx_policy_is_equal(wanted, it))
return it - cache->cache;
list_for_each_entry(it, &cache->free, link)
if (wfx_tx_policy_is_equal(wanted, it))
return it - cache->cache;
return -1;
}
static void wfx_tx_policy_use(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry)
{
++entry->usage_count;
list_move(&entry->link, &cache->used);
}
static int wfx_tx_policy_release(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry)
{
int ret = --entry->usage_count;
if (!ret)
list_move(&entry->link, &cache->free);
return ret;
}
static int wfx_tx_policy_get(struct wfx_vif *wvif, struct ieee80211_tx_rate *rates, bool *renew)
{
int idx;
struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
struct wfx_tx_policy wanted;
struct wfx_tx_policy *entry;
wfx_tx_policy_build(wvif, &wanted, rates);
spin_lock_bh(&cache->lock);
if (list_empty(&cache->free)) {
WARN(1, "unable to get a valid Tx policy");
spin_unlock_bh(&cache->lock);
return HIF_TX_RETRY_POLICY_INVALID;
}
idx = wfx_tx_policy_find(cache, &wanted);
if (idx >= 0) {
*renew = false;
} else {
/* If policy is not found create a new one using the oldest entry in "free" list */
*renew = true;
entry = list_entry(cache->free.prev, struct wfx_tx_policy, link);
memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
entry->uploaded = false;
entry->usage_count = 0;
idx = entry - cache->cache;
}
wfx_tx_policy_use(cache, &cache->cache[idx]);
if (list_empty(&cache->free))
ieee80211_stop_queues(wvif->wdev->hw);
spin_unlock_bh(&cache->lock);
return idx;
}
static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
{
int usage, locked;
struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
if (idx == HIF_TX_RETRY_POLICY_INVALID)
return;
spin_lock_bh(&cache->lock);
locked = list_empty(&cache->free);
usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
if (locked && !usage)
ieee80211_wake_queues(wvif->wdev->hw);
spin_unlock_bh(&cache->lock);
}
static int wfx_tx_policy_upload(struct wfx_vif *wvif)
{
struct wfx_tx_policy *policies = wvif->tx_policy_cache.cache;
u8 tmp_rates[12];
int i, is_used;
do {
spin_lock_bh(&wvif->tx_policy_cache.lock);
for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) {
is_used = memzcmp(policies[i].rates, sizeof(policies[i].rates));
if (!policies[i].uploaded && is_used)
break;
}
if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) {
policies[i].uploaded = true;
memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
spin_unlock_bh(&wvif->tx_policy_cache.lock);
wfx_hif_set_tx_rate_retry_policy(wvif, i, tmp_rates);
} else {
spin_unlock_bh(&wvif->tx_policy_cache.lock);
}
} while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache));
return 0;
}
void wfx_tx_policy_upload_work(struct work_struct *work)
{
struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work);
wfx_tx_policy_upload(wvif);
wfx_tx_unlock(wvif->wdev);
}
void wfx_tx_policy_init(struct wfx_vif *wvif)
{
struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache;
int i;
memset(cache, 0, sizeof(*cache));
spin_lock_init(&cache->lock);
INIT_LIST_HEAD(&cache->used);
INIT_LIST_HEAD(&cache->free);
for (i = 0; i < ARRAY_SIZE(cache->cache); ++i)
list_add(&cache->cache[i].link, &cache->free);
}
/* Tx implementation */
static bool wfx_is_action_back(struct ieee80211_hdr *hdr)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
if (!ieee80211_is_action(mgmt->frame_control))
return false;
if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
return false;
return true;
}
static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr)
{
struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL;
struct ieee80211_vif *vif = wvif_to_vif(wvif);
const u8 *da = ieee80211_get_DA(hdr);
if (sta_priv && sta_priv->link_id)
return sta_priv->link_id;
if (vif->type != NL80211_IFTYPE_AP)
return 0;
if (is_multicast_ether_addr(da))
return 0;
return HIF_LINK_ID_NOT_ASSOCIATED;
}
static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
{
int i;
bool finished;
/* Firmware is not able to mix rates with different flags */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
}
/* Sort rates and remove duplicates */
do {
finished = true;
for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
if (rates[i + 1].idx == rates[i].idx &&
rates[i].idx != -1) {
rates[i].count += rates[i + 1].count;
if (rates[i].count > 15)
rates[i].count = 15;
rates[i + 1].idx = -1;
rates[i + 1].count = 0;
finished = false;
}
if (rates[i + 1].idx > rates[i].idx) {
swap(rates[i + 1], rates[i]);
finished = false;
}
}
} while (!finished);
/* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (rates[i].idx == 0)
break;
if (rates[i].idx == -1) {
rates[i].idx = 0;
rates[i].count = 8; /* == hw->max_rate_tries */
rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
break;
}
}
/* All retries use long GI */
for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
}
static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
{
bool tx_policy_renew = false;
u8 ret;
ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew);
if (ret == HIF_TX_RETRY_POLICY_INVALID)
dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
if (tx_policy_renew) {
wfx_tx_lock(wvif->wdev);
if (!schedule_work(&wvif->tx_policy_upload_work))
wfx_tx_unlock(wvif->wdev);
}
return ret;
}
static int wfx_tx_get_frame_format(struct ieee80211_tx_info *tx_info)
{
if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_MCS))
return HIF_FRAME_FORMAT_NON_HT;
else if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD))
return HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
else
return HIF_FRAME_FORMAT_GF_HT_11N;
}
static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
{
int mic_space;
if (!hw_key)
return 0;
if (hw_key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
return 0;
mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0;
return hw_key->icv_len + mic_space;
}
static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct sk_buff *skb)
{
struct wfx_hif_msg *hif_msg;
struct wfx_hif_req_tx *req;
struct wfx_tx_priv *tx_priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int queue_id = skb_get_queue_mapping(skb);
size_t offset = (size_t)skb->data & 3;
int wmsg_len = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + offset;
WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id");
wfx_tx_fixup_rates(tx_info->driver_rates);
/* From now tx_info->control is unusable */
memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
/* Fill tx_priv */
tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
tx_priv->icv_size = wfx_tx_get_icv_len(hw_key);
/* Fill hif_msg */
WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
WARN(offset & 1, "attempt to transmit an unaligned frame");
skb_put(skb, tx_priv->icv_size);
skb_push(skb, wmsg_len);
memset(skb->data, 0, wmsg_len);
hif_msg = (struct wfx_hif_msg *)skb->data;
hif_msg->len = cpu_to_le16(skb->len);
hif_msg->id = HIF_REQ_ID_TX;
hif_msg->interface = wvif->id;
if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) {
dev_warn(wvif->wdev->dev,
"requested frame size (%d) is larger than maximum supported (%d)\n",
skb->len, le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf));
skb_pull(skb, wmsg_len);
return -EIO;
}
/* Fill tx request */
req = (struct wfx_hif_req_tx *)hif_msg->body;
/* packet_id just need to be unique on device. 32bits are more than necessary for that task,
* so we take advantage of it to add some extra data for debug.
*/
req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF;
req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16;
req->packet_id |= queue_id << 28;
req->fc_offset = offset;
/* Queue index are inverted between firmware and Linux */
req->queue_id = 3 - queue_id;
req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info);
req->frame_format = wfx_tx_get_frame_format(tx_info);
if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
req->short_gi = 1;
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
req->after_dtim = 1;
/* Auxiliary operations */
wfx_tx_queues_put(wvif, skb);
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
schedule_work(&wvif->update_tim_work);
wfx_bh_request_tx(wvif->wdev);
return 0;
}
void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb)
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif;
struct ieee80211_sta *sta = control ? control->sta : NULL;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
size_t driver_data_room = sizeof_field(struct ieee80211_tx_info, rate_driver_data);
BUILD_BUG_ON_MSG(sizeof(struct wfx_tx_priv) > driver_data_room,
"struct tx_priv is too large");
WARN(skb->next || skb->prev, "skb is already member of a list");
/* control.vif can be NULL for injected frames */
if (tx_info->control.vif)
wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv;
else
wvif = wvif_iterate(wdev, NULL);
if (WARN_ON(!wvif))
goto drop;
/* Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any BlockAck session
* management frame. The check below exist just in case.
*/
if (wfx_is_action_back(hdr)) {
dev_info(wdev->dev, "drop BA action\n");
goto drop;
}
if (wfx_tx_inner(wvif, sta, skb))
goto drop;
return;
drop:
ieee80211_tx_status_irqsafe(wdev->hw, skb);
}
static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb)
{
struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data;
struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body;
unsigned int offset = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) +
req->fc_offset;
if (!wvif) {
pr_warn("vif associated with the skb does not exist anymore\n");
return;
}
wfx_tx_policy_put(wvif, req->retry_policy_index);
skb_pull(skb, offset);
ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb);
}
static void wfx_tx_fill_rates(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info,
const struct wfx_hif_cnf_tx *arg)
{
struct ieee80211_tx_rate *rate;
int tx_count;
int i;
tx_count = arg->ack_failures;
if (!arg->status || arg->ack_failures)
tx_count += 1; /* Also report success */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
rate = &tx_info->status.rates[i];
if (rate->idx < 0)
break;
if (tx_count < rate->count && arg->status == HIF_STATUS_TX_FAIL_RETRIES &&
arg->ack_failures)
dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n",
rate->count, tx_count);
if (tx_count <= rate->count && tx_count &&
arg->txed_rate != wfx_get_hw_rate(wdev, rate))
dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n",
arg->txed_rate, wfx_get_hw_rate(wdev, rate));
if (tx_count > rate->count) {
tx_count -= rate->count;
} else if (!tx_count) {
rate->count = 0;
rate->idx = -1;
} else {
rate->count = tx_count;
tx_count = 0;
}
}
if (tx_count)
dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count);
}
void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg)
{
const struct wfx_tx_priv *tx_priv;
struct ieee80211_tx_info *tx_info;
struct wfx_vif *wvif;
struct sk_buff *skb;
skb = wfx_pending_get(wdev, arg->packet_id);
if (!skb) {
dev_warn(wdev->dev, "received unknown packet_id (%#.8x) from chip\n",
arg->packet_id);
return;
}
tx_info = IEEE80211_SKB_CB(skb);
tx_priv = wfx_skb_tx_priv(skb);
wvif = wdev_to_wvif(wdev, ((struct wfx_hif_msg *)skb->data)->interface);
WARN_ON(!wvif);
if (!wvif)
return;
/* Note that wfx_pending_get_pkt_us_delay() get data from tx_info */
_trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
wfx_tx_fill_rates(wdev, tx_info, arg);
skb_trim(skb, skb->len - tx_priv->icv_size);
/* From now, you can touch to tx_info->status, but do not touch to tx_priv anymore */
/* FIXME: use ieee80211_tx_info_clear_status() */
memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
memset(tx_info->pad, 0, sizeof(tx_info->pad));
if (!arg->status) {
tx_info->status.tx_time = le32_to_cpu(arg->media_delay) -
le32_to_cpu(arg->tx_queue_delay);
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
else
tx_info->flags |= IEEE80211_TX_STAT_ACK;
} else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) {
WARN(!arg->requeue, "incoherent status and result_flags");
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */
schedule_work(&wvif->update_tim_work);
}
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
}
wfx_skb_dtor(wvif, skb);
}
static void wfx_flush_vif(struct wfx_vif *wvif, u32 queues, struct sk_buff_head *dropped)
{
struct wfx_queue *queue;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (!(BIT(i) & queues))
continue;
queue = &wvif->tx_queue[i];
if (dropped)
wfx_tx_queue_drop(wvif, queue, dropped);
}
if (wvif->wdev->chip_frozen)
return;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (!(BIT(i) & queues))
continue;
queue = &wvif->tx_queue[i];
if (wait_event_timeout(wvif->wdev->tx_dequeue, wfx_tx_queue_empty(wvif, queue),
msecs_to_jiffies(1000)) <= 0)
dev_warn(wvif->wdev->dev, "frames queued while flushing tx queues?");
}
}
void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop)
{
struct wfx_dev *wdev = hw->priv;
struct sk_buff_head dropped;
struct wfx_vif *wvif;
struct wfx_hif_msg *hif;
struct sk_buff *skb;
skb_queue_head_init(&dropped);
if (vif) {
wvif = (struct wfx_vif *)vif->drv_priv;
wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
} else {
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
wfx_flush_vif(wvif, queues, drop ? &dropped : NULL);
}
wfx_tx_flush(wdev);
if (wdev->chip_frozen)
wfx_pending_drop(wdev, &dropped);
while ((skb = skb_dequeue(&dropped)) != NULL) {
hif = (struct wfx_hif_msg *)skb->data;
wvif = wdev_to_wvif(wdev, hif->interface);
ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb));
wfx_skb_dtor(wvif, skb);
}
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/data_tx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Debugfs interface.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/crc32.h>
#include "debug.h"
#include "wfx.h"
#include "sta.h"
#include "main.h"
#include "hif_tx.h"
#include "hif_tx_mib.h"
#define CREATE_TRACE_POINTS
#include "traces.h"
static const struct trace_print_flags hif_msg_print_map[] = {
hif_msg_list,
};
static const struct trace_print_flags hif_mib_print_map[] = {
hif_mib_list,
};
static const struct trace_print_flags wfx_reg_print_map[] = {
wfx_reg_list,
};
static const char *get_symbol(unsigned long val, const struct trace_print_flags *symbol_array)
{
int i;
for (i = 0; symbol_array[i].mask != -1; i++) {
if (val == symbol_array[i].mask)
return symbol_array[i].name;
}
return "unknown";
}
const char *wfx_get_hif_name(unsigned long id)
{
return get_symbol(id, hif_msg_print_map);
}
const char *wfx_get_mib_name(unsigned long id)
{
return get_symbol(id, hif_mib_print_map);
}
const char *wfx_get_reg_name(unsigned long id)
{
return get_symbol(id, wfx_reg_print_map);
}
static int wfx_counters_show(struct seq_file *seq, void *v)
{
int ret, i;
struct wfx_dev *wdev = seq->private;
struct wfx_hif_mib_extended_count_table counters[3];
for (i = 0; i < ARRAY_SIZE(counters); i++) {
ret = wfx_hif_get_counters_table(wdev, i, counters + i);
if (ret < 0)
return ret;
if (ret > 0)
return -EIO;
}
seq_printf(seq, "%-24s %12s %12s %12s\n", "", "global", "iface 0", "iface 1");
#define PUT_COUNTER(name) \
seq_printf(seq, "%-24s %12d %12d %12d\n", #name, \
le32_to_cpu(counters[2].count_##name), \
le32_to_cpu(counters[0].count_##name), \
le32_to_cpu(counters[1].count_##name))
PUT_COUNTER(tx_frames);
PUT_COUNTER(tx_frames_multicast);
PUT_COUNTER(tx_frames_success);
PUT_COUNTER(tx_frames_retried);
PUT_COUNTER(tx_frames_multi_retried);
PUT_COUNTER(tx_frames_failed);
PUT_COUNTER(ack_failed);
PUT_COUNTER(rts_success);
PUT_COUNTER(rts_failed);
PUT_COUNTER(rx_frames);
PUT_COUNTER(rx_frames_multicast);
PUT_COUNTER(rx_frames_success);
PUT_COUNTER(rx_frames_failed);
PUT_COUNTER(drop_plcp);
PUT_COUNTER(drop_fcs);
PUT_COUNTER(drop_no_key);
PUT_COUNTER(drop_decryption);
PUT_COUNTER(drop_tkip_mic);
PUT_COUNTER(drop_bip_mic);
PUT_COUNTER(drop_cmac_icv);
PUT_COUNTER(drop_cmac_replay);
PUT_COUNTER(drop_ccmp_replay);
PUT_COUNTER(drop_duplicate);
PUT_COUNTER(rx_bcn_miss);
PUT_COUNTER(rx_bcn_success);
PUT_COUNTER(rx_bcn_dtim);
PUT_COUNTER(rx_bcn_dtim_aid0_clr);
PUT_COUNTER(rx_bcn_dtim_aid0_set);
#undef PUT_COUNTER
for (i = 0; i < ARRAY_SIZE(counters[0].reserved); i++)
seq_printf(seq, "reserved[%02d]%12s %12d %12d %12d\n", i, "",
le32_to_cpu(counters[2].reserved[i]),
le32_to_cpu(counters[0].reserved[i]),
le32_to_cpu(counters[1].reserved[i]));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wfx_counters);
static const char * const channel_names[] = {
[0] = "1M",
[1] = "2M",
[2] = "5.5M",
[3] = "11M",
/* Entries 4 and 5 does not exist */
[6] = "6M",
[7] = "9M",
[8] = "12M",
[9] = "18M",
[10] = "24M",
[11] = "36M",
[12] = "48M",
[13] = "54M",
[14] = "MCS0",
[15] = "MCS1",
[16] = "MCS2",
[17] = "MCS3",
[18] = "MCS4",
[19] = "MCS5",
[20] = "MCS6",
[21] = "MCS7",
};
static int wfx_rx_stats_show(struct seq_file *seq, void *v)
{
struct wfx_dev *wdev = seq->private;
struct wfx_hif_rx_stats *st = &wdev->rx_stats;
int i;
mutex_lock(&wdev->rx_stats_lock);
seq_printf(seq, "Timestamp: %dus\n", st->date);
seq_printf(seq, "Low power clock: frequency %uHz, external %s\n",
le32_to_cpu(st->pwr_clk_freq), st->is_ext_pwr_clk ? "yes" : "no");
seq_printf(seq, "Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
st->nb_rx_frame, st->per_total, st->throughput);
seq_puts(seq, " Num. of PER RSSI SNR CFO\n");
seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n");
for (i = 0; i < ARRAY_SIZE(channel_names); i++) {
if (channel_names[i])
seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n",
channel_names[i],
le32_to_cpu(st->nb_rx_by_rate[i]),
le16_to_cpu(st->per[i]),
(s16)le16_to_cpu(st->rssi[i]) / 100,
(s16)le16_to_cpu(st->snr[i]) / 100,
(s16)le16_to_cpu(st->cfo[i]));
}
mutex_unlock(&wdev->rx_stats_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats);
static int wfx_tx_power_loop_show(struct seq_file *seq, void *v)
{
struct wfx_dev *wdev = seq->private;
struct wfx_hif_tx_power_loop_info *st = &wdev->tx_power_loop_info;
int tmp;
mutex_lock(&wdev->tx_power_loop_info_lock);
tmp = le16_to_cpu(st->tx_gain_dig);
seq_printf(seq, "Tx gain digital: %d\n", tmp);
tmp = le16_to_cpu(st->tx_gain_pa);
seq_printf(seq, "Tx gain PA: %d\n", tmp);
tmp = (s16)le16_to_cpu(st->target_pout);
seq_printf(seq, "Target Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25);
tmp = (s16)le16_to_cpu(st->p_estimation);
seq_printf(seq, "FEM Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25);
tmp = le16_to_cpu(st->vpdet);
seq_printf(seq, "Vpdet: %d mV\n", tmp);
seq_printf(seq, "Measure index: %d\n", st->measurement_index);
mutex_unlock(&wdev->tx_power_loop_info_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wfx_tx_power_loop);
static ssize_t wfx_send_pds_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wfx_dev *wdev = file->private_data;
char *buf;
int ret;
if (*ppos != 0) {
dev_dbg(wdev->dev, "PDS data must be written in one transaction");
return -EBUSY;
}
buf = memdup_user(user_buf, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
*ppos = *ppos + count;
ret = wfx_send_pds(wdev, buf, count);
kfree(buf);
if (ret < 0)
return ret;
return count;
}
static const struct file_operations wfx_send_pds_fops = {
.open = simple_open,
.write = wfx_send_pds_write,
};
struct dbgfs_hif_msg {
struct wfx_dev *wdev;
struct completion complete;
u8 reply[1024];
int ret;
};
static ssize_t wfx_send_hif_msg_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct dbgfs_hif_msg *context = file->private_data;
struct wfx_dev *wdev = context->wdev;
struct wfx_hif_msg *request;
if (completion_done(&context->complete)) {
dev_dbg(wdev->dev, "read previous result before start a new one\n");
return -EBUSY;
}
if (count < sizeof(struct wfx_hif_msg))
return -EINVAL;
/* wfx_cmd_send() checks that reply buffer is wide enough, but does not return precise
* length read. User have to know how many bytes should be read. Filling reply buffer with a
* memory pattern may help user.
*/
memset(context->reply, 0xFF, sizeof(context->reply));
request = memdup_user(user_buf, count);
if (IS_ERR(request))
return PTR_ERR(request);
if (le16_to_cpu(request->len) != count) {
kfree(request);
return -EINVAL;
}
context->ret = wfx_cmd_send(wdev, request, context->reply, sizeof(context->reply), false);
kfree(request);
complete(&context->complete);
return count;
}
static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct dbgfs_hif_msg *context = file->private_data;
int ret;
if (count > sizeof(context->reply))
return -EINVAL;
ret = wait_for_completion_interruptible(&context->complete);
if (ret)
return ret;
if (context->ret < 0)
return context->ret;
/* Be careful, write() is waiting for a full message while read() only returns a payload */
if (copy_to_user(user_buf, context->reply, count))
return -EFAULT;
return count;
}
static int wfx_send_hif_msg_open(struct inode *inode, struct file *file)
{
struct dbgfs_hif_msg *context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
context->wdev = inode->i_private;
init_completion(&context->complete);
file->private_data = context;
return 0;
}
static int wfx_send_hif_msg_release(struct inode *inode, struct file *file)
{
struct dbgfs_hif_msg *context = file->private_data;
kfree(context);
return 0;
}
static const struct file_operations wfx_send_hif_msg_fops = {
.open = wfx_send_hif_msg_open,
.release = wfx_send_hif_msg_release,
.write = wfx_send_hif_msg_write,
.read = wfx_send_hif_msg_read,
};
int wfx_debug_init(struct wfx_dev *wdev)
{
struct dentry *d;
d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir);
debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops);
debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops);
debugfs_create_file("tx_power_loop", 0444, d, wdev, &wfx_tx_power_loop_fops);
debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops);
debugfs_create_file("send_hif_msg", 0600, d, wdev, &wfx_send_hif_msg_fops);
return 0;
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/debug.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Device probe and register.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
* Copyright (c) 2008, Johannes Berg <[email protected]>
* Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies).
* Copyright (c) 2007-2009, Christian Lamparter <[email protected]>
* Copyright (c) 2006, Michael Wu <[email protected]>
* Copyright (c) 2004-2006 Jean-Baptiste Note <[email protected]>, et al.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/gpio/consumer.h>
#include <linux/mmc/sdio_func.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include "main.h"
#include "wfx.h"
#include "fwio.h"
#include "hwio.h"
#include "bus.h"
#include "bh.h"
#include "sta.h"
#include "key.h"
#include "scan.h"
#include "debug.h"
#include "data_tx.h"
#include "hif_tx_mib.h"
#include "hif_api_cmd.h"
#define WFX_PDS_TLV_TYPE 0x4450 // "PD" (Platform Data) in ascii little-endian
#define WFX_PDS_MAX_CHUNK_SIZE 1500
MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WF200");
MODULE_AUTHOR("Jérôme Pouiller <[email protected]>");
MODULE_LICENSE("GPL");
#define RATETAB_ENT(_rate, _rateid, _flags) { \
.bitrate = (_rate), \
.hw_value = (_rateid), \
.flags = (_flags), \
}
static struct ieee80211_rate wfx_rates[] = {
RATETAB_ENT(10, 0, 0),
RATETAB_ENT(20, 1, IEEE80211_RATE_SHORT_PREAMBLE),
RATETAB_ENT(55, 2, IEEE80211_RATE_SHORT_PREAMBLE),
RATETAB_ENT(110, 3, IEEE80211_RATE_SHORT_PREAMBLE),
RATETAB_ENT(60, 6, 0),
RATETAB_ENT(90, 7, 0),
RATETAB_ENT(120, 8, 0),
RATETAB_ENT(180, 9, 0),
RATETAB_ENT(240, 10, 0),
RATETAB_ENT(360, 11, 0),
RATETAB_ENT(480, 12, 0),
RATETAB_ENT(540, 13, 0),
};
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
static struct ieee80211_channel wfx_2ghz_chantable[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
CHAN2G(3, 2422, 0),
CHAN2G(4, 2427, 0),
CHAN2G(5, 2432, 0),
CHAN2G(6, 2437, 0),
CHAN2G(7, 2442, 0),
CHAN2G(8, 2447, 0),
CHAN2G(9, 2452, 0),
CHAN2G(10, 2457, 0),
CHAN2G(11, 2462, 0),
CHAN2G(12, 2467, 0),
CHAN2G(13, 2472, 0),
CHAN2G(14, 2484, 0),
};
static const struct ieee80211_supported_band wfx_band_2ghz = {
.channels = wfx_2ghz_chantable,
.n_channels = ARRAY_SIZE(wfx_2ghz_chantable),
.bitrates = wfx_rates,
.n_bitrates = ARRAY_SIZE(wfx_rates),
.ht_cap = {
/* Receive caps */
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_MAX_AMSDU | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
.ht_supported = 1,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
.mcs = {
.rx_mask = { 0xFF }, /* MCS0 to MCS7 */
.rx_highest = cpu_to_le16(72),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
},
};
static const struct ieee80211_iface_limit wdev_iface_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_STATION) },
{ .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
};
static const struct ieee80211_iface_combination wfx_iface_combinations[] = {
{
.num_different_channels = 2,
.max_interfaces = 2,
.limits = wdev_iface_limits,
.n_limits = ARRAY_SIZE(wdev_iface_limits),
}
};
static const struct ieee80211_ops wfx_ops = {
.start = wfx_start,
.stop = wfx_stop,
.add_interface = wfx_add_interface,
.remove_interface = wfx_remove_interface,
.config = wfx_config,
.tx = wfx_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.join_ibss = wfx_join_ibss,
.leave_ibss = wfx_leave_ibss,
.conf_tx = wfx_conf_tx,
.hw_scan = wfx_hw_scan,
.cancel_hw_scan = wfx_cancel_hw_scan,
.start_ap = wfx_start_ap,
.stop_ap = wfx_stop_ap,
.sta_add = wfx_sta_add,
.sta_remove = wfx_sta_remove,
.set_tim = wfx_set_tim,
.set_key = wfx_set_key,
.set_rts_threshold = wfx_set_rts_threshold,
.set_default_unicast_key = wfx_set_default_unicast_key,
.bss_info_changed = wfx_bss_info_changed,
.configure_filter = wfx_configure_filter,
.ampdu_action = wfx_ampdu_action,
.flush = wfx_flush,
.add_chanctx = wfx_add_chanctx,
.remove_chanctx = wfx_remove_chanctx,
.change_chanctx = wfx_change_chanctx,
.assign_vif_chanctx = wfx_assign_vif_chanctx,
.unassign_vif_chanctx = wfx_unassign_vif_chanctx,
};
bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
{
if (wdev->hw_caps.api_version_major < major)
return true;
if (wdev->hw_caps.api_version_major > major)
return false;
if (wdev->hw_caps.api_version_minor < minor)
return true;
return false;
}
/* The device needs data about the antenna configuration. This information in provided by PDS
* (Platform Data Set, this is the wording used in WF200 documentation) files. For hardware
* integrators, the full process to create PDS files is described here:
* https://github.com/SiliconLabs/wfx-firmware/blob/master/PDS/README.md
*
* The PDS file is an array of Time-Length-Value structs.
*/
int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
{
int ret, chunk_type, chunk_len, chunk_num = 0;
if (*buf == '{') {
dev_err(wdev->dev, "PDS: malformed file (legacy format?)\n");
return -EINVAL;
}
while (len > 0) {
chunk_type = get_unaligned_le16(buf + 0);
chunk_len = get_unaligned_le16(buf + 2);
if (chunk_len < 4 || chunk_len > len) {
dev_err(wdev->dev, "PDS:%d: corrupted file\n", chunk_num);
return -EINVAL;
}
if (chunk_type != WFX_PDS_TLV_TYPE) {
dev_info(wdev->dev, "PDS:%d: skip unknown data\n", chunk_num);
goto next;
}
if (chunk_len > WFX_PDS_MAX_CHUNK_SIZE)
dev_warn(wdev->dev, "PDS:%d: unexpectedly large chunk\n", chunk_num);
if (buf[4] != '{' || buf[chunk_len - 1] != '}')
dev_warn(wdev->dev, "PDS:%d: unexpected content\n", chunk_num);
ret = wfx_hif_configuration(wdev, buf + 4, chunk_len - 4);
if (ret > 0) {
dev_err(wdev->dev, "PDS:%d: invalid data (unsupported options?)\n", chunk_num);
return -EINVAL;
}
if (ret == -ETIMEDOUT) {
dev_err(wdev->dev, "PDS:%d: chip didn't reply (corrupted file?)\n", chunk_num);
return ret;
}
if (ret) {
dev_err(wdev->dev, "PDS:%d: chip returned an unknown error\n", chunk_num);
return -EIO;
}
next:
chunk_num++;
len -= chunk_len;
buf += chunk_len;
}
return 0;
}
static int wfx_send_pdata_pds(struct wfx_dev *wdev)
{
int ret = 0;
const struct firmware *pds;
u8 *tmp_buf;
ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev);
if (ret) {
dev_err(wdev->dev, "can't load antenna parameters (PDS file %s). The device may be unstable.\n",
wdev->pdata.file_pds);
return ret;
}
tmp_buf = kmemdup(pds->data, pds->size, GFP_KERNEL);
if (!tmp_buf) {
ret = -ENOMEM;
goto release_fw;
}
ret = wfx_send_pds(wdev, tmp_buf, pds->size);
kfree(tmp_buf);
release_fw:
release_firmware(pds);
return ret;
}
static void wfx_free_common(void *data)
{
struct wfx_dev *wdev = data;
mutex_destroy(&wdev->tx_power_loop_info_lock);
mutex_destroy(&wdev->rx_stats_lock);
mutex_destroy(&wdev->conf_mutex);
ieee80211_free_hw(wdev->hw);
}
struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata,
const struct wfx_hwbus_ops *hwbus_ops, void *hwbus_priv)
{
struct ieee80211_hw *hw;
struct wfx_dev *wdev;
hw = ieee80211_alloc_hw(sizeof(struct wfx_dev), &wfx_ops);
if (!hw)
return NULL;
SET_IEEE80211_DEV(hw, dev);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, MFP_CAPABLE);
hw->vif_data_size = sizeof(struct wfx_vif);
hw->sta_data_size = sizeof(struct wfx_sta_priv);
hw->queues = 4;
hw->max_rates = 8;
hw->max_rate_tries = 8;
hw->extra_tx_headroom = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) +
4 /* alignment */ + 8 /* TKIP IV */;
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
hw->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX;
hw->wiphy->max_scan_ssids = 2;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations);
hw->wiphy->iface_combinations = wfx_iface_combinations;
/* FIXME: also copy wfx_rates and wfx_2ghz_chantable */
hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(dev, &wfx_band_2ghz,
sizeof(wfx_band_2ghz), GFP_KERNEL);
if (!hw->wiphy->bands[NL80211_BAND_2GHZ])
goto err;
wdev = hw->priv;
wdev->hw = hw;
wdev->dev = dev;
wdev->hwbus_ops = hwbus_ops;
wdev->hwbus_priv = hwbus_priv;
memcpy(&wdev->pdata, pdata, sizeof(*pdata));
of_property_read_string(dev->of_node, "silabs,antenna-config-file", &wdev->pdata.file_pds);
wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup", GPIOD_OUT_LOW);
if (IS_ERR(wdev->pdata.gpio_wakeup))
goto err;
if (wdev->pdata.gpio_wakeup)
gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup");
mutex_init(&wdev->conf_mutex);
mutex_init(&wdev->rx_stats_lock);
mutex_init(&wdev->tx_power_loop_info_lock);
init_completion(&wdev->firmware_ready);
INIT_DELAYED_WORK(&wdev->cooling_timeout_work, wfx_cooling_timeout_work);
skb_queue_head_init(&wdev->tx_pending);
init_waitqueue_head(&wdev->tx_dequeue);
wfx_init_hif_cmd(&wdev->hif_cmd);
if (devm_add_action_or_reset(dev, wfx_free_common, wdev))
return NULL;
return wdev;
err:
ieee80211_free_hw(hw);
return NULL;
}
int wfx_probe(struct wfx_dev *wdev)
{
int i;
int err;
struct gpio_desc *gpio_saved;
/* During first part of boot, gpio_wakeup cannot yet been used. So prevent bh() to touch
* it.
*/
gpio_saved = wdev->pdata.gpio_wakeup;
wdev->pdata.gpio_wakeup = NULL;
wdev->poll_irq = true;
wdev->bh_wq = alloc_workqueue("wfx_bh_wq", WQ_HIGHPRI, 0);
if (!wdev->bh_wq)
return -ENOMEM;
wfx_bh_register(wdev);
err = wfx_init_device(wdev);
if (err)
goto bh_unregister;
wfx_bh_poll_irq(wdev);
err = wait_for_completion_timeout(&wdev->firmware_ready, 1 * HZ);
if (err == 0) {
dev_err(wdev->dev, "timeout while waiting for startup indication\n");
err = -ETIMEDOUT;
goto bh_unregister;
}
/* FIXME: fill wiphy::hw_version */
dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n",
wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor,
wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label,
wdev->hw_caps.api_version_major, wdev->hw_caps.api_version_minor,
wdev->keyset, wdev->hw_caps.link_mode);
snprintf(wdev->hw->wiphy->fw_version,
sizeof(wdev->hw->wiphy->fw_version),
"%d.%d.%d",
wdev->hw_caps.firmware_major,
wdev->hw_caps.firmware_minor,
wdev->hw_caps.firmware_build);
if (wfx_api_older_than(wdev, 1, 0)) {
dev_err(wdev->dev, "unsupported firmware API version (expect 1 while firmware returns %d)\n",
wdev->hw_caps.api_version_major);
err = -EOPNOTSUPP;
goto bh_unregister;
}
if (wdev->hw_caps.link_mode == SEC_LINK_ENFORCED) {
dev_err(wdev->dev, "chip require secure_link, but can't negotiate it\n");
goto bh_unregister;
}
if (wdev->hw_caps.region_sel_mode) {
wdev->hw->wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS;
wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[11].flags |=
IEEE80211_CHAN_NO_IR;
wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[12].flags |=
IEEE80211_CHAN_NO_IR;
wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[13].flags |=
IEEE80211_CHAN_DISABLED;
}
dev_dbg(wdev->dev, "sending configuration file %s\n", wdev->pdata.file_pds);
err = wfx_send_pdata_pds(wdev);
if (err < 0 && err != -ENOENT)
goto bh_unregister;
wdev->poll_irq = false;
err = wdev->hwbus_ops->irq_subscribe(wdev->hwbus_priv);
if (err)
goto bh_unregister;
err = wfx_hif_use_multi_tx_conf(wdev, true);
if (err)
dev_err(wdev->dev, "misconfigured IRQ?\n");
wdev->pdata.gpio_wakeup = gpio_saved;
if (wdev->pdata.gpio_wakeup) {
dev_dbg(wdev->dev, "enable 'quiescent' power mode with wakeup GPIO and PDS file %s\n",
wdev->pdata.file_pds);
gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
wfx_control_reg_write(wdev, 0);
wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT);
} else {
wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE);
}
for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) {
eth_zero_addr(wdev->addresses[i].addr);
err = of_get_mac_address(wdev->dev->of_node, wdev->addresses[i].addr);
if (!err)
wdev->addresses[i].addr[ETH_ALEN - 1] += i;
else
ether_addr_copy(wdev->addresses[i].addr, wdev->hw_caps.mac_addr[i]);
if (!is_valid_ether_addr(wdev->addresses[i].addr)) {
dev_warn(wdev->dev, "using random MAC address\n");
eth_random_addr(wdev->addresses[i].addr);
}
dev_info(wdev->dev, "MAC address %d: %pM\n", i, wdev->addresses[i].addr);
}
wdev->hw->wiphy->n_addresses = ARRAY_SIZE(wdev->addresses);
wdev->hw->wiphy->addresses = wdev->addresses;
if (!wfx_api_older_than(wdev, 3, 8))
wdev->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
err = ieee80211_register_hw(wdev->hw);
if (err)
goto irq_unsubscribe;
err = wfx_debug_init(wdev);
if (err)
goto ieee80211_unregister;
return 0;
ieee80211_unregister:
ieee80211_unregister_hw(wdev->hw);
irq_unsubscribe:
wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
bh_unregister:
wfx_bh_unregister(wdev);
destroy_workqueue(wdev->bh_wq);
return err;
}
void wfx_release(struct wfx_dev *wdev)
{
ieee80211_unregister_hw(wdev->hw);
wfx_hif_shutdown(wdev);
wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
wfx_bh_unregister(wdev);
destroy_workqueue(wdev->bh_wq);
}
static int __init wfx_core_init(void)
{
int ret = 0;
if (IS_ENABLED(CONFIG_SPI))
ret = spi_register_driver(&wfx_spi_driver);
if (IS_ENABLED(CONFIG_MMC) && !ret)
ret = sdio_register_driver(&wfx_sdio_driver);
return ret;
}
module_init(wfx_core_init);
static void __exit wfx_core_exit(void)
{
if (IS_ENABLED(CONFIG_MMC))
sdio_unregister_driver(&wfx_sdio_driver);
if (IS_ENABLED(CONFIG_SPI))
spi_unregister_driver(&wfx_spi_driver);
}
module_exit(wfx_core_exit);
|
linux-master
|
drivers/net/wireless/silabs/wfx/main.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* SDIO interface.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/module.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/irq.h>
#include <linux/align.h>
#include "bus.h"
#include "wfx.h"
#include "hwio.h"
#include "main.h"
#include "bh.h"
static const struct wfx_platform_data pdata_wf200 = {
.file_fw = "wfx/wfm_wf200",
.file_pds = "wfx/wf200.pds",
};
static const struct wfx_platform_data pdata_brd4001a = {
.file_fw = "wfx/wfm_wf200",
.file_pds = "wfx/brd4001a.pds",
};
static const struct wfx_platform_data pdata_brd8022a = {
.file_fw = "wfx/wfm_wf200",
.file_pds = "wfx/brd8022a.pds",
};
static const struct wfx_platform_data pdata_brd8023a = {
.file_fw = "wfx/wfm_wf200",
.file_pds = "wfx/brd8023a.pds",
};
struct wfx_sdio_priv {
struct sdio_func *func;
struct wfx_dev *core;
u8 buf_id_tx;
u8 buf_id_rx;
int of_irq;
};
static int wfx_sdio_copy_from_io(void *priv, unsigned int reg_id, void *dst, size_t count)
{
struct wfx_sdio_priv *bus = priv;
unsigned int sdio_addr = reg_id << 2;
int ret;
WARN(reg_id > 7, "chip only has 7 registers");
WARN(!IS_ALIGNED((uintptr_t)dst, 4), "unaligned buffer address");
WARN(!IS_ALIGNED(count, 4), "unaligned buffer size");
/* Use queue mode buffers */
if (reg_id == WFX_REG_IN_OUT_QUEUE)
sdio_addr |= (bus->buf_id_rx + 1) << 7;
ret = sdio_memcpy_fromio(bus->func, dst, sdio_addr, count);
if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
bus->buf_id_rx = (bus->buf_id_rx + 1) % 4;
return ret;
}
static int wfx_sdio_copy_to_io(void *priv, unsigned int reg_id, const void *src, size_t count)
{
struct wfx_sdio_priv *bus = priv;
unsigned int sdio_addr = reg_id << 2;
int ret;
WARN(reg_id > 7, "chip only has 7 registers");
WARN(!IS_ALIGNED((uintptr_t)src, 4), "unaligned buffer address");
WARN(!IS_ALIGNED(count, 4), "unaligned buffer size");
/* Use queue mode buffers */
if (reg_id == WFX_REG_IN_OUT_QUEUE)
sdio_addr |= bus->buf_id_tx << 7;
/* FIXME: discards 'const' qualifier for src */
ret = sdio_memcpy_toio(bus->func, sdio_addr, (void *)src, count);
if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
bus->buf_id_tx = (bus->buf_id_tx + 1) % 32;
return ret;
}
static void wfx_sdio_lock(void *priv)
{
struct wfx_sdio_priv *bus = priv;
sdio_claim_host(bus->func);
}
static void wfx_sdio_unlock(void *priv)
{
struct wfx_sdio_priv *bus = priv;
sdio_release_host(bus->func);
}
static void wfx_sdio_irq_handler(struct sdio_func *func)
{
struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
wfx_bh_request_rx(bus->core);
}
static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv)
{
struct wfx_sdio_priv *bus = priv;
sdio_claim_host(bus->func);
wfx_bh_request_rx(bus->core);
sdio_release_host(bus->func);
return IRQ_HANDLED;
}
static int wfx_sdio_irq_subscribe(void *priv)
{
struct wfx_sdio_priv *bus = priv;
u32 flags;
int ret;
u8 cccr;
if (!bus->of_irq) {
sdio_claim_host(bus->func);
ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler);
sdio_release_host(bus->func);
return ret;
}
flags = irq_get_trigger_type(bus->of_irq);
if (!flags)
flags = IRQF_TRIGGER_HIGH;
flags |= IRQF_ONESHOT;
ret = devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL,
wfx_sdio_irq_handler_ext, flags, "wfx", bus);
if (ret)
return ret;
sdio_claim_host(bus->func);
cccr = sdio_f0_readb(bus->func, SDIO_CCCR_IENx, NULL);
cccr |= BIT(0);
cccr |= BIT(bus->func->num);
sdio_f0_writeb(bus->func, cccr, SDIO_CCCR_IENx, NULL);
sdio_release_host(bus->func);
return 0;
}
static int wfx_sdio_irq_unsubscribe(void *priv)
{
struct wfx_sdio_priv *bus = priv;
int ret;
if (bus->of_irq)
devm_free_irq(&bus->func->dev, bus->of_irq, bus);
sdio_claim_host(bus->func);
ret = sdio_release_irq(bus->func);
sdio_release_host(bus->func);
return ret;
}
static size_t wfx_sdio_align_size(void *priv, size_t size)
{
struct wfx_sdio_priv *bus = priv;
return sdio_align_size(bus->func, size);
}
static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = {
.copy_from_io = wfx_sdio_copy_from_io,
.copy_to_io = wfx_sdio_copy_to_io,
.irq_subscribe = wfx_sdio_irq_subscribe,
.irq_unsubscribe = wfx_sdio_irq_unsubscribe,
.lock = wfx_sdio_lock,
.unlock = wfx_sdio_unlock,
.align_size = wfx_sdio_align_size,
};
static const struct of_device_id wfx_sdio_of_match[] = {
{ .compatible = "silabs,wf200", .data = &pdata_wf200 },
{ .compatible = "silabs,brd4001a", .data = &pdata_brd4001a },
{ .compatible = "silabs,brd8022a", .data = &pdata_brd8022a },
{ .compatible = "silabs,brd8023a", .data = &pdata_brd8023a },
{ },
};
MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
{
const struct wfx_platform_data *pdata = of_device_get_match_data(&func->dev);
struct device_node *np = func->dev.of_node;
struct wfx_sdio_priv *bus;
int ret;
if (func->num != 1) {
dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n",
func->num);
return -ENODEV;
}
if (!pdata) {
dev_warn(&func->dev, "no compatible device found in DT\n");
return -ENODEV;
}
bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL);
if (!bus)
return -ENOMEM;
bus->func = func;
bus->of_irq = irq_of_parse_and_map(np, 0);
sdio_set_drvdata(func, bus);
sdio_claim_host(func);
ret = sdio_enable_func(func);
/* Block of 64 bytes is more efficient than 512B for frame sizes < 4k */
sdio_set_block_size(func, 64);
sdio_release_host(func);
if (ret)
return ret;
bus->core = wfx_init_common(&func->dev, pdata, &wfx_sdio_hwbus_ops, bus);
if (!bus->core) {
ret = -EIO;
goto sdio_release;
}
ret = wfx_probe(bus->core);
if (ret)
goto sdio_release;
return 0;
sdio_release:
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
return ret;
}
static void wfx_sdio_remove(struct sdio_func *func)
{
struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
wfx_release(bus->core);
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
}
static const struct sdio_device_id wfx_sdio_ids[] = {
/* WF200 does not have official VID/PID */
{ SDIO_DEVICE(0x0000, 0x1000) },
{ },
};
MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
struct sdio_driver wfx_sdio_driver = {
.name = "wfx-sdio",
.id_table = wfx_sdio_ids,
.probe = wfx_sdio_probe,
.remove = wfx_sdio_remove,
.drv = {
.owner = THIS_MODULE,
.of_match_table = wfx_sdio_of_match,
}
};
|
linux-master
|
drivers/net/wireless/silabs/wfx/bus_sdio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Implementation of mac80211 API.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "sta.h"
#include "wfx.h"
#include "fwio.h"
#include "bh.h"
#include "key.h"
#include "scan.h"
#include "debug.h"
#include "hif_tx.h"
#include "hif_tx_mib.h"
#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2
u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
{
int i;
u32 ret = 0;
/* The device only supports 2GHz */
struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
for (i = 0; i < sband->n_bitrates; i++) {
if (rates & BIT(i)) {
if (i >= sband->n_bitrates)
dev_warn(wdev->dev, "unsupported basic rate\n");
else
ret |= BIT(sband->bitrates[i].hw_value);
}
}
return ret;
}
void wfx_cooling_timeout_work(struct work_struct *work)
{
struct wfx_dev *wdev = container_of(to_delayed_work(work), struct wfx_dev,
cooling_timeout_work);
wdev->chip_frozen = true;
wfx_tx_unlock(wdev);
}
void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd)
{
if (cmd == STA_NOTIFY_AWAKE) {
/* Device recover normal temperature */
if (cancel_delayed_work(&wdev->cooling_timeout_work))
wfx_tx_unlock(wdev);
} else {
/* Device is too hot */
schedule_delayed_work(&wdev->cooling_timeout_work, 10 * HZ);
wfx_tx_lock(wdev);
}
}
static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon)
{
static const struct wfx_hif_ie_table_entry filter_ies[] = {
{
.ie_id = WLAN_EID_VENDOR_SPECIFIC,
.has_changed = 1,
.no_longer = 1,
.has_appeared = 1,
.oui = { 0x50, 0x6F, 0x9A },
}, {
.ie_id = WLAN_EID_HT_OPERATION,
.has_changed = 1,
.no_longer = 1,
.has_appeared = 1,
}, {
.ie_id = WLAN_EID_ERP_INFO,
.has_changed = 1,
.no_longer = 1,
.has_appeared = 1,
}, {
.ie_id = WLAN_EID_CHANNEL_SWITCH,
.has_changed = 1,
.no_longer = 1,
.has_appeared = 1,
}
};
if (!filter_beacon) {
wfx_hif_beacon_filter_control(wvif, 0, 1);
} else {
wfx_hif_set_beacon_filter_table(wvif, ARRAY_SIZE(filter_ies), filter_ies);
wfx_hif_beacon_filter_control(wvif, HIF_BEACON_FILTER_ENABLE, 0);
}
}
void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *total_flags, u64 unused)
{
bool filter_bssid, filter_prbreq, filter_beacon;
struct ieee80211_vif *vif = NULL;
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = NULL;
/* Notes:
* - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered
* - PS-Poll (FIF_PSPOLL) are never filtered
* - RTS, CTS and Ack (FIF_CONTROL) are always filtered
* - Broken frames (FIF_FCSFAIL and FIF_PLCPFAIL) are always filtered
* - Firmware does (yet) allow to forward unicast traffic sent to other stations (aka.
* promiscuous mode)
*/
*total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS |
FIF_PROBE_REQ | FIF_PSPOLL;
mutex_lock(&wdev->conf_mutex);
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
mutex_lock(&wvif->scan_lock);
/* Note: FIF_BCN_PRBRESP_PROMISC covers probe response and
* beacons from other BSS
*/
if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
filter_beacon = false;
else
filter_beacon = true;
wfx_filter_beacon(wvif, filter_beacon);
if (*total_flags & FIF_OTHER_BSS)
filter_bssid = false;
else
filter_bssid = true;
vif = wvif_to_vif(wvif);
/* In AP mode, chip can reply to probe request itself */
if (*total_flags & FIF_PROBE_REQ && vif->type == NL80211_IFTYPE_AP) {
dev_dbg(wdev->dev, "do not forward probe request in AP mode\n");
*total_flags &= ~FIF_PROBE_REQ;
}
if (*total_flags & FIF_PROBE_REQ)
filter_prbreq = false;
else
filter_prbreq = true;
wfx_hif_set_rx_filter(wvif, filter_bssid, filter_prbreq);
mutex_unlock(&wvif->scan_lock);
}
mutex_unlock(&wdev->conf_mutex);
}
static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
{
struct ieee80211_channel *chan0 = NULL, *chan1 = NULL;
struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
struct ieee80211_vif *vif = wvif_to_vif(wvif);
WARN(!vif->cfg.assoc && enable_ps,
"enable_ps is reliable only if associated");
if (wdev_to_wvif(wvif->wdev, 0)) {
struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0);
struct ieee80211_vif *vif_ch0 = wvif_to_vif(wvif_ch0);
chan0 = vif_ch0->bss_conf.chandef.chan;
}
if (wdev_to_wvif(wvif->wdev, 1)) {
struct wfx_vif *wvif_ch1 = wdev_to_wvif(wvif->wdev, 1);
struct ieee80211_vif *vif_ch1 = wvif_to_vif(wvif_ch1);
chan1 = vif_ch1->bss_conf.chandef.chan;
}
if (chan0 && chan1 && vif->type != NL80211_IFTYPE_AP) {
if (chan0->hw_value == chan1->hw_value) {
/* It is useless to enable PS if channels are the same. */
if (enable_ps)
*enable_ps = false;
if (vif->cfg.assoc && vif->cfg.ps)
dev_info(wvif->wdev->dev, "ignoring requested PS mode");
return -1;
}
/* It is necessary to enable PS if channels are different. */
if (enable_ps)
*enable_ps = true;
if (wfx_api_older_than(wvif->wdev, 3, 2))
return 0;
else
return 30;
}
if (enable_ps)
*enable_ps = vif->cfg.ps;
if (vif->cfg.assoc && vif->cfg.ps)
return conf->dynamic_ps_timeout;
else
return -1;
}
int wfx_update_pm(struct wfx_vif *wvif)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
int ps_timeout;
bool ps;
if (!vif->cfg.assoc)
return 0;
ps_timeout = wfx_get_ps_timeout(wvif, &ps);
if (!ps)
ps_timeout = 0;
WARN_ON(ps_timeout < 0);
if (wvif->uapsd_mask)
ps_timeout = 0;
if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete, TU_TO_JIFFIES(512)))
dev_warn(wvif->wdev->dev, "timeout while waiting of set_pm_mode_complete\n");
return wfx_hif_set_pm(wvif, ps, ps_timeout);
}
int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
int old_uapsd = wvif->uapsd_mask;
WARN_ON(queue >= hw->queues);
mutex_lock(&wdev->conf_mutex);
assign_bit(queue, &wvif->uapsd_mask, params->uapsd);
wfx_hif_set_edca_queue_params(wvif, queue, params);
if (vif->type == NL80211_IFTYPE_STATION &&
old_uapsd != wvif->uapsd_mask) {
wfx_hif_set_uapsd_info(wvif, wvif->uapsd_mask);
wfx_update_pm(wvif);
}
mutex_unlock(&wdev->conf_mutex);
return 0;
}
int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
wfx_hif_rts_threshold(wvif, value);
return 0;
}
void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
{
/* RSSI: signed Q8.0, RCPI: unsigned Q7.1
* RSSI = RCPI / 2 - 110
*/
struct ieee80211_vif *vif = wvif_to_vif(wvif);
int rcpi_rssi;
int cqm_evt;
rcpi_rssi = raw_rcpi_rssi / 2 - 110;
if (rcpi_rssi <= vif->bss_conf.cqm_rssi_thold)
cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
ieee80211_cqm_rssi_notify(vif, cqm_evt, rcpi_rssi, GFP_KERNEL);
}
static void wfx_beacon_loss_work(struct work_struct *work)
{
struct wfx_vif *wvif = container_of(to_delayed_work(work), struct wfx_vif,
beacon_loss_work);
struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
ieee80211_beacon_loss(vif);
schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(bss_conf->beacon_int));
}
void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wfx_hif_wep_default_key_id(wvif, idx);
}
void wfx_reset(struct wfx_vif *wvif)
{
struct wfx_dev *wdev = wvif->wdev;
wfx_tx_lock_flush(wdev);
wfx_hif_reset(wvif, false);
wfx_tx_policy_init(wvif);
if (wvif_count(wdev) <= 1)
wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
wfx_tx_unlock(wdev);
wvif->join_in_progress = false;
cancel_delayed_work_sync(&wvif->beacon_loss_work);
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
wfx_update_pm(wvif);
}
int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
sta_priv->vif_id = wvif->id;
if (vif->type == NL80211_IFTYPE_STATION)
wfx_hif_set_mfp(wvif, sta->mfp, sta->mfp);
/* In station mode, the firmware interprets new link-id as a TDLS peer */
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
return 0;
sta_priv->link_id = ffz(wvif->link_id_map);
wvif->link_id_map |= BIT(sta_priv->link_id);
WARN_ON(!sta_priv->link_id);
WARN_ON(sta_priv->link_id >= HIF_LINK_ID_MAX);
wfx_hif_map_link(wvif, false, sta->addr, sta_priv->link_id, sta->mfp);
return 0;
}
int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
/* See note in wfx_sta_add() */
if (!sta_priv->link_id)
return 0;
/* FIXME add a mutex? */
wfx_hif_map_link(wvif, true, sta->addr, sta_priv->link_id, false);
wvif->link_id_map &= ~BIT(sta_priv->link_id);
return 0;
}
static int wfx_upload_ap_templates(struct wfx_vif *wvif)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct sk_buff *skb;
skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0);
if (!skb)
return -ENOMEM;
wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN, API_RATE_INDEX_B_1MBPS);
dev_kfree_skb(skb);
skb = ieee80211_proberesp_get(wvif->wdev->hw, vif);
if (!skb)
return -ENOMEM;
wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES, API_RATE_INDEX_B_1MBPS);
dev_kfree_skb(skb);
return 0;
}
static void wfx_set_mfp_ap(struct wfx_vif *wvif)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0);
const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
const int pairwise_cipher_suite_size = 4 / sizeof(u16);
const int akm_suite_size = 4 / sizeof(u16);
if (ptr) {
ptr += pairwise_cipher_suite_count_offset;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
return;
ptr += 1 + pairwise_cipher_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
return;
ptr += 1 + akm_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
return;
wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
}
}
int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct wfx_dev *wdev = wvif->wdev;
int ret;
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
wfx_update_pm(wvif);
wvif = (struct wfx_vif *)vif->drv_priv;
wfx_upload_ap_templates(wvif);
ret = wfx_hif_start(wvif, &vif->bss_conf, wvif->channel);
if (ret > 0)
return -EIO;
wfx_set_mfp_ap(wvif);
return ret;
}
void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wfx_reset(wvif);
}
static void wfx_join(struct wfx_vif *wvif)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct ieee80211_bss_conf *conf = &vif->bss_conf;
struct cfg80211_bss *bss = NULL;
u8 ssid[IEEE80211_MAX_SSID_LEN];
const u8 *ssid_ie = NULL;
int ssid_len = 0;
int ret;
wfx_tx_lock_flush(wvif->wdev);
bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel, conf->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (!bss && !vif->cfg.ibss_joined) {
wfx_tx_unlock(wvif->wdev);
return;
}
rcu_read_lock(); /* protect ssid_ie */
if (bss)
ssid_ie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
if (ssid_ie) {
ssid_len = ssid_ie[1];
if (ssid_len > IEEE80211_MAX_SSID_LEN)
ssid_len = IEEE80211_MAX_SSID_LEN;
memcpy(ssid, &ssid_ie[2], ssid_len);
}
rcu_read_unlock();
cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
wvif->join_in_progress = true;
ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssid_len);
if (ret) {
ieee80211_connection_loss(vif);
wfx_reset(wvif);
} else {
/* Due to beacon filtering it is possible that the AP's beacon is not known for the
* mac80211 stack. Disable filtering temporary to make sure the stack receives at
* least one
*/
wfx_filter_beacon(wvif, false);
}
wfx_tx_unlock(wvif->wdev);
}
static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *info)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct ieee80211_sta *sta = NULL;
int ampdu_density = 0;
bool greenfield = false;
rcu_read_lock(); /* protect sta */
if (info->bssid && !vif->cfg.ibss_joined)
sta = ieee80211_find_sta(vif, info->bssid);
if (sta && sta->deflink.ht_cap.ht_supported)
ampdu_density = sta->deflink.ht_cap.ampdu_density;
if (sta && sta->deflink.ht_cap.ht_supported &&
!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
greenfield = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
rcu_read_unlock();
wvif->join_in_progress = false;
wfx_hif_set_association_mode(wvif, ampdu_density, greenfield, info->use_short_preamble);
wfx_hif_keep_alive_period(wvif, 0);
/* beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use the same value. */
wfx_hif_set_bss_params(wvif, vif->cfg.aid, 7);
wfx_hif_set_beacon_wakeup_period(wvif, 1, 1);
wfx_update_pm(wvif);
}
int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wfx_upload_ap_templates(wvif);
wfx_join(wvif);
return 0;
}
void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wfx_reset(wvif);
}
static void wfx_enable_beacon(struct wfx_vif *wvif, bool enable)
{
/* Driver has Content After DTIM Beacon in queue. Driver is waiting for a signal from the
* firmware. Since we are going to stop to send beacons, this signal will never happens. See
* also wfx_suspend_resume_mc()
*/
if (!enable && wfx_tx_queues_has_cab(wvif)) {
wvif->after_dtim_tx_allowed = true;
wfx_bh_request_tx(wvif->wdev);
}
wfx_hif_beacon_transmit(wvif, enable);
}
void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u64 changed)
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
int i;
mutex_lock(&wdev->conf_mutex);
if (changed & BSS_CHANGED_BASIC_RATES ||
changed & BSS_CHANGED_BEACON_INT ||
changed & BSS_CHANGED_BSSID) {
if (vif->type == NL80211_IFTYPE_STATION)
wfx_join(wvif);
}
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc || vif->cfg.ibss_joined)
wfx_join_finalize(wvif, info);
else if (!vif->cfg.assoc && vif->type == NL80211_IFTYPE_STATION)
wfx_reset(wvif);
else
dev_warn(wdev->dev, "misunderstood change: ASSOC\n");
}
if (changed & BSS_CHANGED_BEACON_INFO) {
if (vif->type != NL80211_IFTYPE_STATION)
dev_warn(wdev->dev, "misunderstood change: BEACON_INFO\n");
wfx_hif_set_beacon_wakeup_period(wvif, info->dtim_period, info->dtim_period);
/* We temporary forwarded beacon for join process. It is now no more necessary. */
wfx_filter_beacon(wvif, true);
}
if (changed & BSS_CHANGED_ARP_FILTER) {
for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
__be32 *arp_addr = &vif->cfg.arp_addr_list[i];
if (vif->cfg.arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
arp_addr = NULL;
if (i >= vif->cfg.arp_addr_cnt)
arp_addr = NULL;
wfx_hif_set_arp_ipv4_filter(wvif, i, arp_addr);
}
}
if (changed & BSS_CHANGED_AP_PROBE_RESP || changed & BSS_CHANGED_BEACON)
wfx_upload_ap_templates(wvif);
if (changed & BSS_CHANGED_BEACON_ENABLED)
wfx_enable_beacon(wvif, info->enable_beacon);
if (changed & BSS_CHANGED_KEEP_ALIVE)
wfx_hif_keep_alive_period(wvif,
info->max_idle_period * USEC_PER_TU / USEC_PER_MSEC);
if (changed & BSS_CHANGED_ERP_CTS_PROT)
wfx_hif_erp_use_protection(wvif, info->use_cts_prot);
if (changed & BSS_CHANGED_ERP_SLOT)
wfx_hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
if (changed & BSS_CHANGED_CQM)
wfx_hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold, info->cqm_rssi_hyst);
if (changed & BSS_CHANGED_TXPOWER)
wfx_hif_set_output_power(wvif, info->txpower);
if (changed & BSS_CHANGED_PS)
wfx_update_pm(wvif);
mutex_unlock(&wdev->conf_mutex);
}
static int wfx_update_tim(struct wfx_vif *wvif)
{
struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct sk_buff *skb;
u16 tim_offset, tim_length;
u8 *tim_ptr;
skb = ieee80211_beacon_get_tim(wvif->wdev->hw, vif, &tim_offset,
&tim_length, 0);
if (!skb)
return -ENOENT;
tim_ptr = skb->data + tim_offset;
if (tim_offset && tim_length >= 6) {
/* Firmware handles DTIM counter internally */
tim_ptr[2] = 0;
/* Set/reset aid0 bit */
if (wfx_tx_queues_has_cab(wvif))
tim_ptr[4] |= 1;
else
tim_ptr[4] &= ~1;
}
wfx_hif_update_ie_beacon(wvif, tim_ptr, tim_length);
dev_kfree_skb(skb);
return 0;
}
static void wfx_update_tim_work(struct work_struct *work)
{
struct wfx_vif *wvif = container_of(work, struct wfx_vif, update_tim_work);
wfx_update_tim(wvif);
}
int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
{
struct wfx_dev *wdev = hw->priv;
struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *)&sta->drv_priv;
struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
if (!wvif) {
dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
return -EIO;
}
schedule_work(&wvif->update_tim_work);
return 0;
}
void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd)
{
struct wfx_vif *wvif_it;
if (notify_cmd != STA_NOTIFY_AWAKE)
return;
/* Device won't be able to honor CAB if a scan is in progress on any interface. Prefer to
* skip this DTIM and wait for the next one.
*/
wvif_it = NULL;
while ((wvif_it = wvif_iterate(wvif->wdev, wvif_it)) != NULL)
if (mutex_is_locked(&wvif_it->scan_lock))
return;
if (!wfx_tx_queues_has_cab(wvif) || wvif->after_dtim_tx_allowed)
dev_warn(wvif->wdev->dev, "incorrect sequence (%d CAB in queue)",
wfx_tx_queues_has_cab(wvif));
wvif->after_dtim_tx_allowed = true;
wfx_bh_request_tx(wvif->wdev);
}
int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
/* Aggregation is implemented fully in firmware */
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
/* Just acknowledge it to enable frame re-ordering */
return 0;
default:
/* Leave the firmware doing its business for tx aggregation */
return -EOPNOTSUPP;
}
}
int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf)
{
return 0;
}
void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf)
{
}
void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed)
{
}
int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct ieee80211_channel *ch = conf->def.chan;
WARN(wvif->channel, "channel overwrite");
wvif->channel = ch;
return 0;
}
void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct ieee80211_channel *ch = conf->def.chan;
WARN(wvif->channel != ch, "channel mismatch");
wvif->channel = NULL;
}
int wfx_config(struct ieee80211_hw *hw, u32 changed)
{
return 0;
}
int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
int i;
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_UAPSD |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
mutex_lock(&wdev->conf_mutex);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
break;
default:
mutex_unlock(&wdev->conf_mutex);
return -EOPNOTSUPP;
}
wvif->wdev = wdev;
wvif->link_id_map = 1; /* link-id 0 is reserved for multicast */
INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work);
INIT_DELAYED_WORK(&wvif->beacon_loss_work, wfx_beacon_loss_work);
init_completion(&wvif->set_pm_mode_complete);
complete(&wvif->set_pm_mode_complete);
INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
mutex_init(&wvif->scan_lock);
init_completion(&wvif->scan_complete);
INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
wfx_tx_queues_init(wvif);
wfx_tx_policy_init(wvif);
for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
if (!wdev->vif[i]) {
wdev->vif[i] = vif;
wvif->id = i;
break;
}
}
WARN(i == ARRAY_SIZE(wdev->vif), "try to instantiate more vif than supported");
wfx_hif_set_macaddr(wvif, vif->addr);
mutex_unlock(&wdev->conf_mutex);
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
/* Combo mode does not support Block Acks. We can re-enable them */
if (wvif_count(wdev) == 1)
wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
else
wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00);
}
return 0;
}
void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
wfx_tx_queues_check_empty(wvif);
mutex_lock(&wdev->conf_mutex);
WARN(wvif->link_id_map != 1, "corrupted state");
wfx_hif_reset(wvif, false);
wfx_hif_set_macaddr(wvif, NULL);
wfx_tx_policy_init(wvif);
cancel_delayed_work_sync(&wvif->beacon_loss_work);
wdev->vif[wvif->id] = NULL;
mutex_unlock(&wdev->conf_mutex);
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
/* Combo mode does not support Block Acks. We can re-enable them */
if (wvif_count(wdev) == 1)
wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
else
wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00);
}
}
int wfx_start(struct ieee80211_hw *hw)
{
return 0;
}
void wfx_stop(struct ieee80211_hw *hw)
{
struct wfx_dev *wdev = hw->priv;
WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending));
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/sta.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Key management related functions.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "key.h"
#include "wfx.h"
#include "hif_tx_mib.h"
static int wfx_alloc_key(struct wfx_dev *wdev)
{
int idx;
idx = ffs(~wdev->key_map) - 1;
if (idx < 0 || idx >= MAX_KEY_ENTRIES)
return -1;
wdev->key_map |= BIT(idx);
return idx;
}
static void wfx_free_key(struct wfx_dev *wdev, int idx)
{
WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation");
wdev->key_map &= ~BIT(idx);
}
static u8 fill_wep_pair(struct wfx_hif_wep_pairwise_key *msg,
struct ieee80211_key_conf *key, u8 *peer_addr)
{
WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
msg->key_length = key->keylen;
memcpy(msg->key_data, key->key, key->keylen);
ether_addr_copy(msg->peer_address, peer_addr);
return HIF_KEY_TYPE_WEP_PAIRWISE;
}
static u8 fill_wep_group(struct wfx_hif_wep_group_key *msg,
struct ieee80211_key_conf *key)
{
WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
msg->key_id = key->keyidx;
msg->key_length = key->keylen;
memcpy(msg->key_data, key->key, key->keylen);
return HIF_KEY_TYPE_WEP_DEFAULT;
}
static u8 fill_tkip_pair(struct wfx_hif_tkip_pairwise_key *msg,
struct ieee80211_key_conf *key, u8 *peer_addr)
{
u8 *keybuf = key->key;
WARN(key->keylen != sizeof(msg->tkip_key_data) + sizeof(msg->tx_mic_key) +
sizeof(msg->rx_mic_key), "inconsistent data");
memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
keybuf += sizeof(msg->tkip_key_data);
memcpy(msg->tx_mic_key, keybuf, sizeof(msg->tx_mic_key));
keybuf += sizeof(msg->tx_mic_key);
memcpy(msg->rx_mic_key, keybuf, sizeof(msg->rx_mic_key));
ether_addr_copy(msg->peer_address, peer_addr);
return HIF_KEY_TYPE_TKIP_PAIRWISE;
}
static u8 fill_tkip_group(struct wfx_hif_tkip_group_key *msg, struct ieee80211_key_conf *key,
struct ieee80211_key_seq *seq, enum nl80211_iftype iftype)
{
u8 *keybuf = key->key;
WARN(key->keylen != sizeof(msg->tkip_key_data) + 2 * sizeof(msg->rx_mic_key),
"inconsistent data");
msg->key_id = key->keyidx;
memcpy(msg->rx_sequence_counter, &seq->tkip.iv16, sizeof(seq->tkip.iv16));
memcpy(msg->rx_sequence_counter + sizeof(u16), &seq->tkip.iv32, sizeof(seq->tkip.iv32));
memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
keybuf += sizeof(msg->tkip_key_data);
if (iftype == NL80211_IFTYPE_AP)
/* Use Tx MIC Key */
memcpy(msg->rx_mic_key, keybuf + 0, sizeof(msg->rx_mic_key));
else
/* Use Rx MIC Key */
memcpy(msg->rx_mic_key, keybuf + 8, sizeof(msg->rx_mic_key));
return HIF_KEY_TYPE_TKIP_GROUP;
}
static u8 fill_ccmp_pair(struct wfx_hif_aes_pairwise_key *msg,
struct ieee80211_key_conf *key, u8 *peer_addr)
{
WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
ether_addr_copy(msg->peer_address, peer_addr);
memcpy(msg->aes_key_data, key->key, key->keylen);
return HIF_KEY_TYPE_AES_PAIRWISE;
}
static u8 fill_ccmp_group(struct wfx_hif_aes_group_key *msg,
struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq)
{
WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
memcpy(msg->aes_key_data, key->key, key->keylen);
memcpy(msg->rx_sequence_counter, seq->ccmp.pn, sizeof(seq->ccmp.pn));
memreverse(msg->rx_sequence_counter, sizeof(seq->ccmp.pn));
msg->key_id = key->keyidx;
return HIF_KEY_TYPE_AES_GROUP;
}
static u8 fill_sms4_pair(struct wfx_hif_wapi_pairwise_key *msg,
struct ieee80211_key_conf *key, u8 *peer_addr)
{
u8 *keybuf = key->key;
WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data),
"inconsistent data");
ether_addr_copy(msg->peer_address, peer_addr);
memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
keybuf += sizeof(msg->wapi_key_data);
memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
msg->key_id = key->keyidx;
return HIF_KEY_TYPE_WAPI_PAIRWISE;
}
static u8 fill_sms4_group(struct wfx_hif_wapi_group_key *msg,
struct ieee80211_key_conf *key)
{
u8 *keybuf = key->key;
WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data),
"inconsistent data");
memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
keybuf += sizeof(msg->wapi_key_data);
memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
msg->key_id = key->keyidx;
return HIF_KEY_TYPE_WAPI_GROUP;
}
static u8 fill_aes_cmac_group(struct wfx_hif_igtk_group_key *msg,
struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq)
{
WARN(key->keylen != sizeof(msg->igtk_key_data), "inconsistent data");
memcpy(msg->igtk_key_data, key->key, key->keylen);
memcpy(msg->ipn, seq->aes_cmac.pn, sizeof(seq->aes_cmac.pn));
memreverse(msg->ipn, sizeof(seq->aes_cmac.pn));
msg->key_id = key->keyidx;
return HIF_KEY_TYPE_IGTK_GROUP;
}
static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
int ret;
struct wfx_hif_req_add_key k = { };
struct ieee80211_key_seq seq;
struct wfx_dev *wdev = wvif->wdev;
int idx = wfx_alloc_key(wvif->wdev);
bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
struct ieee80211_vif *vif = wvif_to_vif(wvif);
WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data");
ieee80211_get_key_rx_seq(key, 0, &seq);
if (idx < 0)
return -EINVAL;
k.int_id = wvif->id;
k.entry_index = idx;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) {
if (pairwise)
k.type = fill_wep_pair(&k.key.wep_pairwise_key, key, sta->addr);
else
k.type = fill_wep_group(&k.key.wep_group_key, key);
} else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
if (pairwise)
k.type = fill_tkip_pair(&k.key.tkip_pairwise_key, key, sta->addr);
else
k.type = fill_tkip_group(&k.key.tkip_group_key, key, &seq,
vif->type);
} else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) {
if (pairwise)
k.type = fill_ccmp_pair(&k.key.aes_pairwise_key, key, sta->addr);
else
k.type = fill_ccmp_group(&k.key.aes_group_key, key, &seq);
} else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) {
if (pairwise)
k.type = fill_sms4_pair(&k.key.wapi_pairwise_key, key, sta->addr);
else
k.type = fill_sms4_group(&k.key.wapi_group_key, key);
} else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
k.type = fill_aes_cmac_group(&k.key.igtk_group_key, key, &seq);
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
} else {
dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher);
wfx_free_key(wdev, idx);
return -EOPNOTSUPP;
}
ret = wfx_hif_add_key(wdev, &k);
if (ret) {
wfx_free_key(wdev, idx);
return -EOPNOTSUPP;
}
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
key->hw_key_idx = idx;
return 0;
}
static int wfx_remove_key(struct wfx_vif *wvif, struct ieee80211_key_conf *key)
{
WARN(key->hw_key_idx >= MAX_KEY_ENTRIES, "corrupted hw_key_idx");
wfx_free_key(wvif->wdev, key->hw_key_idx);
return wfx_hif_remove_key(wvif->wdev, key->hw_key_idx);
}
int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_key_conf *key)
{
int ret = -EOPNOTSUPP;
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
mutex_lock(&wvif->wdev->conf_mutex);
if (cmd == SET_KEY)
ret = wfx_add_key(wvif, sta, key);
if (cmd == DISABLE_KEY)
ret = wfx_remove_key(wvif, key);
mutex_unlock(&wvif->wdev->conf_mutex);
return ret;
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/key.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Handling of the chip-to-host events (aka indications) of the hardware API.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include "hif_rx.h"
#include "wfx.h"
#include "scan.h"
#include "bh.h"
#include "sta.h"
#include "data_rx.h"
#include "hif_api_cmd.h"
static int wfx_hif_generic_confirm(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
/* All confirm messages start with status */
int status = le32_to_cpup((__le32 *)buf);
int cmd = hif->id;
int len = le16_to_cpu(hif->len) - 4; /* drop header */
WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
if (!wdev->hif_cmd.buf_send) {
dev_warn(wdev->dev, "unexpected confirmation: 0x%.2x\n", cmd);
return -EINVAL;
}
if (cmd != wdev->hif_cmd.buf_send->id) {
dev_warn(wdev->dev, "chip response mismatch request: 0x%.2x vs 0x%.2x\n",
cmd, wdev->hif_cmd.buf_send->id);
return -EINVAL;
}
if (wdev->hif_cmd.buf_recv) {
if (wdev->hif_cmd.len_recv >= len && len > 0)
memcpy(wdev->hif_cmd.buf_recv, buf, len);
else
status = -EIO;
}
wdev->hif_cmd.ret = status;
complete(&wdev->hif_cmd.done);
return status;
}
static int wfx_hif_tx_confirm(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
const struct wfx_hif_cnf_tx *body = buf;
wfx_tx_confirm_cb(wdev, body);
return 0;
}
static int wfx_hif_multi_tx_confirm(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
const struct wfx_hif_cnf_multi_transmit *body = buf;
int i;
WARN(body->num_tx_confs <= 0, "corrupted message");
for (i = 0; i < body->num_tx_confs; i++)
wfx_tx_confirm_cb(wdev, &body->tx_conf_payload[i]);
return 0;
}
static int wfx_hif_startup_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
const struct wfx_hif_ind_startup *body = buf;
if (body->status || body->firmware_type > 4) {
dev_err(wdev->dev, "received invalid startup indication");
return -EINVAL;
}
memcpy(&wdev->hw_caps, body, sizeof(struct wfx_hif_ind_startup));
complete(&wdev->firmware_ready);
return 0;
}
static int wfx_hif_wakeup_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
if (!wdev->pdata.gpio_wakeup || gpiod_get_value(wdev->pdata.gpio_wakeup) == 0) {
dev_warn(wdev->dev, "unexpected wake-up indication\n");
return -EIO;
}
return 0;
}
static int wfx_hif_receive_indication(struct wfx_dev *wdev, const struct wfx_hif_msg *hif,
const void *buf, struct sk_buff *skb)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
const struct wfx_hif_ind_rx *body = buf;
if (!wvif) {
dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
return -EIO;
}
skb_pull(skb, sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_ind_rx));
wfx_rx_cb(wvif, body, skb);
return 0;
}
static int wfx_hif_event_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
const struct wfx_hif_ind_event *body = buf;
int type = le32_to_cpu(body->event_id);
if (!wvif) {
dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
return -EIO;
}
switch (type) {
case HIF_EVENT_IND_RCPI_RSSI:
wfx_event_report_rssi(wvif, body->event_data.rcpi_rssi);
break;
case HIF_EVENT_IND_BSSLOST:
schedule_delayed_work(&wvif->beacon_loss_work, 0);
break;
case HIF_EVENT_IND_BSSREGAINED:
cancel_delayed_work(&wvif->beacon_loss_work);
dev_dbg(wdev->dev, "ignore BSSREGAINED indication\n");
break;
case HIF_EVENT_IND_PS_MODE_ERROR:
dev_warn(wdev->dev, "error while processing power save request: %d\n",
le32_to_cpu(body->event_data.ps_mode_error));
break;
default:
dev_warn(wdev->dev, "unhandled event indication: %.2x\n", type);
break;
}
return 0;
}
static int wfx_hif_pm_mode_complete_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
if (!wvif) {
dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
return -EIO;
}
complete(&wvif->set_pm_mode_complete);
return 0;
}
static int wfx_hif_scan_complete_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
const struct wfx_hif_ind_scan_cmpl *body = buf;
if (!wvif) {
dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
return -EIO;
}
wfx_scan_complete(wvif, body->num_channels_completed);
return 0;
}
static int wfx_hif_join_complete_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
if (!wvif) {
dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
return -EIO;
}
dev_warn(wdev->dev, "unattended JoinCompleteInd\n");
return 0;
}
static int wfx_hif_suspend_resume_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
const struct wfx_hif_ind_suspend_resume_tx *body = buf;
struct wfx_vif *wvif;
if (body->bc_mc_only) {
wvif = wdev_to_wvif(wdev, hif->interface);
if (!wvif) {
dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
return -EIO;
}
if (body->resume)
wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
else
wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
} else {
WARN(body->peer_sta_set, "misunderstood indication");
WARN(hif->interface != 2, "misunderstood indication");
if (body->resume)
wfx_suspend_hot_dev(wdev, STA_NOTIFY_AWAKE);
else
wfx_suspend_hot_dev(wdev, STA_NOTIFY_SLEEP);
}
return 0;
}
static int wfx_hif_generic_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
const struct wfx_hif_ind_generic *body = buf;
int type = le32_to_cpu(body->type);
switch (type) {
case HIF_GENERIC_INDICATION_TYPE_RAW:
return 0;
case HIF_GENERIC_INDICATION_TYPE_STRING:
dev_info(wdev->dev, "firmware says: %s\n", (char *)&body->data);
return 0;
case HIF_GENERIC_INDICATION_TYPE_RX_STATS:
mutex_lock(&wdev->rx_stats_lock);
/* Older firmware send a generic indication beside RxStats */
if (!wfx_api_older_than(wdev, 1, 4))
dev_info(wdev->dev, "Rx test ongoing. Temperature: %d degrees C\n",
body->data.rx_stats.current_temp);
memcpy(&wdev->rx_stats, &body->data.rx_stats, sizeof(wdev->rx_stats));
mutex_unlock(&wdev->rx_stats_lock);
return 0;
case HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO:
mutex_lock(&wdev->tx_power_loop_info_lock);
memcpy(&wdev->tx_power_loop_info, &body->data.tx_power_loop_info,
sizeof(wdev->tx_power_loop_info));
mutex_unlock(&wdev->tx_power_loop_info_lock);
return 0;
default:
dev_err(wdev->dev, "generic_indication: unknown indication type: %#.8x\n", type);
return -EIO;
}
}
static const struct {
int val;
const char *str;
bool has_param;
} hif_errors[] = {
{ HIF_ERROR_FIRMWARE_ROLLBACK,
"rollback status" },
{ HIF_ERROR_FIRMWARE_DEBUG_ENABLED,
"debug feature enabled" },
{ HIF_ERROR_PDS_PAYLOAD,
"PDS version is not supported" },
{ HIF_ERROR_PDS_TESTFEATURE,
"PDS ask for an unknown test mode" },
{ HIF_ERROR_OOR_VOLTAGE,
"out-of-range power supply voltage", true },
{ HIF_ERROR_OOR_TEMPERATURE,
"out-of-range temperature", true },
{ HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE,
"secure link does not expect request during key exchange" },
{ HIF_ERROR_SLK_SESSION_KEY,
"secure link session key is invalid" },
{ HIF_ERROR_SLK_OVERFLOW,
"secure link overflow" },
{ HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE,
"secure link messages list does not match message encryption" },
{ HIF_ERROR_SLK_UNCONFIGURED,
"secure link not yet configured" },
{ HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW,
"bus clock is too slow (<1kHz)" },
{ HIF_ERROR_HIF_RX_DATA_TOO_LARGE,
"HIF message too large" },
/* Following errors only exists in old firmware versions: */
{ HIF_ERROR_HIF_TX_QUEUE_FULL,
"HIF messages queue is full" },
{ HIF_ERROR_HIF_BUS,
"HIF bus" },
{ HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED,
"secure link does not support multi-tx confirmations" },
{ HIF_ERROR_SLK_OUTDATED_SESSION_KEY,
"secure link session key is outdated" },
{ HIF_ERROR_SLK_DECRYPTION,
"secure link params (nonce or tag) mismatch" },
};
static int wfx_hif_error_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
const struct wfx_hif_ind_error *body = buf;
int type = le32_to_cpu(body->type);
int param = (s8)body->data[0];
int i;
for (i = 0; i < ARRAY_SIZE(hif_errors); i++)
if (type == hif_errors[i].val)
break;
if (i < ARRAY_SIZE(hif_errors))
if (hif_errors[i].has_param)
dev_err(wdev->dev, "asynchronous error: %s: %d\n",
hif_errors[i].str, param);
else
dev_err(wdev->dev, "asynchronous error: %s\n", hif_errors[i].str);
else
dev_err(wdev->dev, "asynchronous error: unknown: %08x\n", type);
print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET,
16, 1, hif, le16_to_cpu(hif->len), false);
wdev->chip_frozen = true;
return 0;
};
static int wfx_hif_exception_indication(struct wfx_dev *wdev,
const struct wfx_hif_msg *hif, const void *buf)
{
const struct wfx_hif_ind_exception *body = buf;
int type = le32_to_cpu(body->type);
if (type == 4)
dev_err(wdev->dev, "firmware assert %d\n", le32_to_cpup((__le32 *)body->data));
else
dev_err(wdev->dev, "firmware exception\n");
print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET,
16, 1, hif, le16_to_cpu(hif->len), false);
wdev->chip_frozen = true;
return -1;
}
static const struct {
int msg_id;
int (*handler)(struct wfx_dev *wdev, const struct wfx_hif_msg *hif, const void *buf);
} hif_handlers[] = {
/* Confirmations */
{ HIF_CNF_ID_TX, wfx_hif_tx_confirm },
{ HIF_CNF_ID_MULTI_TRANSMIT, wfx_hif_multi_tx_confirm },
/* Indications */
{ HIF_IND_ID_STARTUP, wfx_hif_startup_indication },
{ HIF_IND_ID_WAKEUP, wfx_hif_wakeup_indication },
{ HIF_IND_ID_JOIN_COMPLETE, wfx_hif_join_complete_indication },
{ HIF_IND_ID_SET_PM_MODE_CMPL, wfx_hif_pm_mode_complete_indication },
{ HIF_IND_ID_SCAN_CMPL, wfx_hif_scan_complete_indication },
{ HIF_IND_ID_SUSPEND_RESUME_TX, wfx_hif_suspend_resume_indication },
{ HIF_IND_ID_EVENT, wfx_hif_event_indication },
{ HIF_IND_ID_GENERIC, wfx_hif_generic_indication },
{ HIF_IND_ID_ERROR, wfx_hif_error_indication },
{ HIF_IND_ID_EXCEPTION, wfx_hif_exception_indication },
/* FIXME: allocate skb_p from wfx_hif_receive_indication and make it generic */
//{ HIF_IND_ID_RX, wfx_hif_receive_indication },
};
void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
{
int i;
const struct wfx_hif_msg *hif = (const struct wfx_hif_msg *)skb->data;
int hif_id = hif->id;
if (hif_id == HIF_IND_ID_RX) {
/* wfx_hif_receive_indication take care of skb lifetime */
wfx_hif_receive_indication(wdev, hif, hif->body, skb);
return;
}
/* Note: mutex_is_lock cause an implicit memory barrier that protect buf_send */
if (mutex_is_locked(&wdev->hif_cmd.lock) &&
wdev->hif_cmd.buf_send && wdev->hif_cmd.buf_send->id == hif_id) {
wfx_hif_generic_confirm(wdev, hif, hif->body);
goto free;
}
for (i = 0; i < ARRAY_SIZE(hif_handlers); i++) {
if (hif_handlers[i].msg_id == hif_id) {
if (hif_handlers[i].handler)
hif_handlers[i].handler(wdev, hif, hif->body);
goto free;
}
}
if (hif_id & HIF_ID_IS_INDICATION)
dev_err(wdev->dev, "unsupported HIF indication: ID %02x\n", hif_id);
else
dev_err(wdev->dev, "unexpected HIF confirmation: ID %02x\n", hif_id);
free:
dev_kfree_skb(skb);
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/hif_rx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Implementation of the host-to-chip MIBs of the hardware API.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
* Copyright (C) 2010, ST-Ericsson SA
*/
#include <linux/etherdevice.h>
#include "wfx.h"
#include "hif_tx.h"
#include "hif_tx_mib.h"
#include "hif_api_mib.h"
int wfx_hif_set_output_power(struct wfx_vif *wvif, int val)
{
struct wfx_hif_mib_current_tx_power_level arg = {
.power_level = cpu_to_le32(val * 10),
};
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
&arg, sizeof(arg));
}
int wfx_hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
unsigned int dtim_interval, unsigned int listen_interval)
{
struct wfx_hif_mib_beacon_wake_up_period arg = {
.wakeup_period_min = dtim_interval,
.receive_dtim = 0,
.wakeup_period_max = listen_interval,
};
if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
return -EINVAL;
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
&arg, sizeof(arg));
}
int wfx_hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif, int rssi_thold, int rssi_hyst)
{
struct wfx_hif_mib_rcpi_rssi_threshold arg = {
.rolling_average_count = 8,
.detection = 1,
};
if (!rssi_thold && !rssi_hyst) {
arg.upperthresh = 1;
arg.lowerthresh = 1;
} else {
arg.upper_threshold = rssi_thold + rssi_hyst;
arg.upper_threshold = (arg.upper_threshold + 110) * 2;
arg.lower_threshold = rssi_thold;
arg.lower_threshold = (arg.lower_threshold + 110) * 2;
}
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RCPI_RSSI_THRESHOLD,
&arg, sizeof(arg));
}
int wfx_hif_get_counters_table(struct wfx_dev *wdev, int vif_id,
struct wfx_hif_mib_extended_count_table *arg)
{
if (wfx_api_older_than(wdev, 1, 3)) {
/* extended_count_table is wider than count_table */
memset(arg, 0xFF, sizeof(*arg));
return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_COUNTERS_TABLE,
arg, sizeof(struct wfx_hif_mib_count_table));
} else {
return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_EXTENDED_COUNTERS_TABLE,
arg, sizeof(struct wfx_hif_mib_extended_count_table));
}
}
int wfx_hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
{
struct wfx_hif_mib_mac_address arg = { };
if (mac)
ether_addr_copy(arg.mac_addr, mac);
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
&arg, sizeof(arg));
}
int wfx_hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid, bool filter_prbreq)
{
struct wfx_hif_mib_rx_filter arg = { };
if (filter_bssid)
arg.bssid_filter = 1;
if (!filter_prbreq)
arg.fwd_probe_req = 1;
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER, &arg, sizeof(arg));
}
int wfx_hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len,
const struct wfx_hif_ie_table_entry *tbl)
{
int ret;
struct wfx_hif_mib_bcn_filter_table *arg;
int buf_len = struct_size(arg, ie_table, tbl_len);
arg = kzalloc(buf_len, GFP_KERNEL);
if (!arg)
return -ENOMEM;
arg->num_of_info_elmts = cpu_to_le32(tbl_len);
memcpy(arg->ie_table, tbl, flex_array_size(arg, ie_table, tbl_len));
ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_TABLE,
arg, buf_len);
kfree(arg);
return ret;
}
int wfx_hif_beacon_filter_control(struct wfx_vif *wvif, int enable, int beacon_count)
{
struct wfx_hif_mib_bcn_filter_enable arg = {
.enable = cpu_to_le32(enable),
.bcn_count = cpu_to_le32(beacon_count),
};
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_ENABLE,
&arg, sizeof(arg));
}
int wfx_hif_set_operational_mode(struct wfx_dev *wdev, enum wfx_hif_op_power_mode mode)
{
struct wfx_hif_mib_gl_operational_power_mode arg = {
.power_mode = mode,
.wup_ind_activation = 1,
};
return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
&arg, sizeof(arg));
}
int wfx_hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
u8 frame_type, int init_rate)
{
struct wfx_hif_mib_template_frame *arg;
WARN(skb->len > HIF_API_MAX_TEMPLATE_FRAME_SIZE, "frame is too big");
skb_push(skb, 4);
arg = (struct wfx_hif_mib_template_frame *)skb->data;
skb_pull(skb, 4);
arg->init_rate = init_rate;
arg->frame_type = frame_type;
arg->frame_length = cpu_to_le16(skb->len);
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
arg, sizeof(*arg) + skb->len);
}
int wfx_hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
{
struct wfx_hif_mib_protected_mgmt_policy arg = { };
WARN(required && !capable, "incoherent arguments");
if (capable) {
arg.pmf_enable = 1;
arg.host_enc_auth_frames = 1;
}
if (!required)
arg.unpmf_allowed = 1;
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_PROTECTED_MGMT_POLICY,
&arg, sizeof(arg));
}
int wfx_hif_set_block_ack_policy(struct wfx_vif *wvif, u8 tx_tid_policy, u8 rx_tid_policy)
{
struct wfx_hif_mib_block_ack_policy arg = {
.block_ack_tx_tid_policy = tx_tid_policy,
.block_ack_rx_tid_policy = rx_tid_policy,
};
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
&arg, sizeof(arg));
}
int wfx_hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density,
bool greenfield, bool short_preamble)
{
struct wfx_hif_mib_set_association_mode arg = {
.preambtype_use = 1,
.mode = 1,
.spacing = 1,
.short_preamble = short_preamble,
.greenfield = greenfield,
.mpdu_start_spacing = ampdu_density,
};
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_ASSOCIATION_MODE,
&arg, sizeof(arg));
}
int wfx_hif_set_tx_rate_retry_policy(struct wfx_vif *wvif, int policy_index, u8 *rates)
{
struct wfx_hif_mib_set_tx_rate_retry_policy *arg;
size_t size = struct_size(arg, tx_rate_retry_policy, 1);
int ret;
arg = kzalloc(size, GFP_KERNEL);
if (!arg)
return -ENOMEM;
arg->num_tx_rate_policies = 1;
arg->tx_rate_retry_policy[0].policy_index = policy_index;
arg->tx_rate_retry_policy[0].short_retry_count = 255;
arg->tx_rate_retry_policy[0].long_retry_count = 255;
arg->tx_rate_retry_policy[0].first_rate_sel = 1;
arg->tx_rate_retry_policy[0].terminate = 1;
arg->tx_rate_retry_policy[0].count_init = 1;
memcpy(&arg->tx_rate_retry_policy[0].rates, rates,
sizeof(arg->tx_rate_retry_policy[0].rates));
ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY,
arg, size);
kfree(arg);
return ret;
}
int wfx_hif_keep_alive_period(struct wfx_vif *wvif, int period)
{
struct wfx_hif_mib_keep_alive_period arg = {
.keep_alive_period = cpu_to_le16(period),
};
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD,
&arg, sizeof(arg));
};
int wfx_hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr)
{
struct wfx_hif_mib_arp_ip_addr_table arg = {
.condition_idx = idx,
.arp_enable = HIF_ARP_NS_FILTERING_DISABLE,
};
if (addr) {
/* Caution: type of addr is __be32 */
memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
}
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
&arg, sizeof(arg));
}
int wfx_hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable)
{
struct wfx_hif_mib_gl_set_multi_msg arg = {
.enable_multi_tx_conf = enable,
};
return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG, &arg, sizeof(arg));
}
int wfx_hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val)
{
struct wfx_hif_mib_set_uapsd_information arg = { };
if (val & BIT(IEEE80211_AC_VO))
arg.trig_voice = 1;
if (val & BIT(IEEE80211_AC_VI))
arg.trig_video = 1;
if (val & BIT(IEEE80211_AC_BE))
arg.trig_be = 1;
if (val & BIT(IEEE80211_AC_BK))
arg.trig_bckgrnd = 1;
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_UAPSD_INFORMATION,
&arg, sizeof(arg));
}
int wfx_hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
{
struct wfx_hif_mib_non_erp_protection arg = {
.use_cts_to_self = enable,
};
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_NON_ERP_PROTECTION,
&arg, sizeof(arg));
}
int wfx_hif_slot_time(struct wfx_vif *wvif, int val)
{
struct wfx_hif_mib_slot_time arg = {
.slot_time = cpu_to_le32(val),
};
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME, &arg, sizeof(arg));
}
int wfx_hif_wep_default_key_id(struct wfx_vif *wvif, int val)
{
struct wfx_hif_mib_wep_default_key_id arg = {
.wep_default_key_id = val,
};
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
&arg, sizeof(arg));
}
int wfx_hif_rts_threshold(struct wfx_vif *wvif, int val)
{
struct wfx_hif_mib_dot11_rts_threshold arg = {
.threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF),
};
return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_RTS_THRESHOLD,
&arg, sizeof(arg));
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/hif_tx_mib.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Low-level I/O functions.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/align.h>
#include "hwio.h"
#include "wfx.h"
#include "bus.h"
#include "traces.h"
#define WFX_HIF_BUFFER_SIZE 0x2000
static int wfx_read32(struct wfx_dev *wdev, int reg, u32 *val)
{
int ret;
__le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
*val = ~0; /* Never return undefined value */
if (!tmp)
return -ENOMEM;
ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, tmp, sizeof(u32));
if (ret >= 0)
*val = le32_to_cpu(*tmp);
kfree(tmp);
if (ret)
dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret);
return ret;
}
static int wfx_write32(struct wfx_dev *wdev, int reg, u32 val)
{
int ret;
__le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
*tmp = cpu_to_le32(val);
ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, tmp, sizeof(u32));
kfree(tmp);
if (ret)
dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret);
return ret;
}
static int wfx_read32_locked(struct wfx_dev *wdev, int reg, u32 *val)
{
int ret;
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wfx_read32(wdev, reg, val);
_trace_io_read32(reg, *val);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
return ret;
}
static int wfx_write32_locked(struct wfx_dev *wdev, int reg, u32 val)
{
int ret;
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wfx_write32(wdev, reg, val);
_trace_io_write32(reg, val);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
return ret;
}
static int wfx_write32_bits_locked(struct wfx_dev *wdev, int reg, u32 mask, u32 val)
{
int ret;
u32 val_r, val_w;
WARN_ON(~mask & val);
val &= mask;
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wfx_read32(wdev, reg, &val_r);
_trace_io_read32(reg, val_r);
if (ret < 0)
goto err;
val_w = (val_r & ~mask) | val;
if (val_w != val_r) {
ret = wfx_write32(wdev, reg, val_w);
_trace_io_write32(reg, val_w);
}
err:
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
return ret;
}
static int wfx_indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf, size_t len)
{
int ret;
int i;
u32 cfg;
u32 prefetch;
WARN_ON(len >= WFX_HIF_BUFFER_SIZE);
WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
if (reg == WFX_REG_AHB_DPORT)
prefetch = CFG_PREFETCH_AHB;
else if (reg == WFX_REG_SRAM_DPORT)
prefetch = CFG_PREFETCH_SRAM;
else
return -ENODEV;
ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr);
if (ret < 0)
goto err;
ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg);
if (ret < 0)
goto err;
ret = wfx_write32(wdev, WFX_REG_CONFIG, cfg | prefetch);
if (ret < 0)
goto err;
for (i = 0; i < 20; i++) {
ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg);
if (ret < 0)
goto err;
if (!(cfg & prefetch))
break;
usleep_range(200, 250);
}
if (i == 20) {
ret = -ETIMEDOUT;
goto err;
}
ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, buf, len);
err:
if (ret < 0)
memset(buf, 0xFF, len); /* Never return undefined value */
return ret;
}
static int wfx_indirect_write(struct wfx_dev *wdev, int reg, u32 addr,
const void *buf, size_t len)
{
int ret;
WARN_ON(len >= WFX_HIF_BUFFER_SIZE);
WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr);
if (ret < 0)
return ret;
return wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, buf, len);
}
static int wfx_indirect_read_locked(struct wfx_dev *wdev, int reg, u32 addr,
void *buf, size_t len)
{
int ret;
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wfx_indirect_read(wdev, reg, addr, buf, len);
_trace_io_ind_read(reg, addr, buf, len);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
return ret;
}
static int wfx_indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr,
const void *buf, size_t len)
{
int ret;
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wfx_indirect_write(wdev, reg, addr, buf, len);
_trace_io_ind_write(reg, addr, buf, len);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
return ret;
}
static int wfx_indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 *val)
{
int ret;
__le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wfx_indirect_read(wdev, reg, addr, tmp, sizeof(u32));
*val = le32_to_cpu(*tmp);
_trace_io_ind_read32(reg, addr, *val);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
kfree(tmp);
return ret;
}
static int wfx_indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 val)
{
int ret;
__le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
*tmp = cpu_to_le32(val);
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wfx_indirect_write(wdev, reg, addr, tmp, sizeof(u32));
_trace_io_ind_write32(reg, addr, val);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
kfree(tmp);
return ret;
}
int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len)
{
int ret;
WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer");
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len);
_trace_io_read(WFX_REG_IN_OUT_QUEUE, buf, len);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
if (ret)
dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret);
return ret;
}
int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len)
{
int ret;
WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer");
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len);
_trace_io_write(WFX_REG_IN_OUT_QUEUE, buf, len);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
if (ret)
dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret);
return ret;
}
int wfx_sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
{
return wfx_indirect_read_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
}
int wfx_ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
{
return wfx_indirect_read_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
}
int wfx_sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
{
return wfx_indirect_write_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
}
int wfx_ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
{
return wfx_indirect_write_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
}
int wfx_sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
{
return wfx_indirect_read32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
}
int wfx_ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
{
return wfx_indirect_read32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
}
int wfx_sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
{
return wfx_indirect_write32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
}
int wfx_ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
{
return wfx_indirect_write32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
}
int wfx_config_reg_read(struct wfx_dev *wdev, u32 *val)
{
return wfx_read32_locked(wdev, WFX_REG_CONFIG, val);
}
int wfx_config_reg_write(struct wfx_dev *wdev, u32 val)
{
return wfx_write32_locked(wdev, WFX_REG_CONFIG, val);
}
int wfx_config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
{
return wfx_write32_bits_locked(wdev, WFX_REG_CONFIG, mask, val);
}
int wfx_control_reg_read(struct wfx_dev *wdev, u32 *val)
{
return wfx_read32_locked(wdev, WFX_REG_CONTROL, val);
}
int wfx_control_reg_write(struct wfx_dev *wdev, u32 val)
{
return wfx_write32_locked(wdev, WFX_REG_CONTROL, val);
}
int wfx_control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
{
return wfx_write32_bits_locked(wdev, WFX_REG_CONTROL, mask, val);
}
int wfx_igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val)
{
int ret;
*val = ~0; /* Never return undefined value */
ret = wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, IGPR_RW | index << 24);
if (ret)
return ret;
ret = wfx_read32_locked(wdev, WFX_REG_SET_GEN_R_W, val);
if (ret)
return ret;
*val &= IGPR_VALUE;
return ret;
}
int wfx_igpr_reg_write(struct wfx_dev *wdev, int index, u32 val)
{
return wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, index << 24 | val);
}
|
linux-master
|
drivers/net/wireless/silabs/wfx/hwio.c
|
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
|
linux-master
|
drivers/net/wireless/ath/trace.c
|
/*
* Copyright (c) 2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include <asm/unaligned.h>
#include "ath.h"
#include "reg.h"
#define REG_READ (common->ops->read)
#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
/**
* ath_hw_setbssidmask - filter out bssids we listen
*
* @common: the ath_common struct for the device.
*
* BSSID masking is a method used by AR5212 and newer hardware to inform PCU
* which bits of the interface's MAC address should be looked at when trying
* to decide which packets to ACK. In station mode and AP mode with a single
* BSS every bit matters since we lock to only one BSS. In AP mode with
* multiple BSSes (virtual interfaces) not every bit matters because hw must
* accept frames for all BSSes and so we tweak some bits of our mac address
* in order to have multiple BSSes.
*
* NOTE: This is a simple filter and does *not* filter out all
* relevant frames. Some frames that are not for us might get ACKed from us
* by PCU because they just match the mask.
*
* When handling multiple BSSes you can get the BSSID mask by computing the
* set of ~ ( MAC XOR BSSID ) for all bssids we handle.
*
* When you do this you are essentially computing the common bits of all your
* BSSes. Later it is assumed the hardware will "and" (&) the BSSID mask with
* the MAC address to obtain the relevant bits and compare the result with
* (frame's BSSID & mask) to see if they match.
*
* Simple example: on your card you have two BSSes you have created with
* BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
* There is another BSSID-03 but you are not part of it. For simplicity's sake,
* assuming only 4 bits for a mac address and for BSSIDs you can then have:
*
* \
* MAC: 0001 |
* BSSID-01: 0100 | --> Belongs to us
* BSSID-02: 1001 |
* /
* -------------------
* BSSID-03: 0110 | --> External
* -------------------
*
* Our bssid_mask would then be:
*
* On loop iteration for BSSID-01:
* ~(0001 ^ 0100) -> ~(0101)
* -> 1010
* bssid_mask = 1010
*
* On loop iteration for BSSID-02:
* bssid_mask &= ~(0001 ^ 1001)
* bssid_mask = (1010) & ~(0001 ^ 1001)
* bssid_mask = (1010) & ~(1000)
* bssid_mask = (1010) & (0111)
* bssid_mask = 0010
*
* A bssid_mask of 0010 means "only pay attention to the second least
* significant bit". This is because its the only bit common
* amongst the MAC and all BSSIDs we support. To findout what the real
* common bit is we can simply "&" the bssid_mask now with any BSSID we have
* or our MAC address (we assume the hardware uses the MAC address).
*
* Now, suppose there's an incoming frame for BSSID-03:
*
* IFRAME-01: 0110
*
* An easy eye-inspeciton of this already should tell you that this frame
* will not pass our check. This is because the bssid_mask tells the
* hardware to only look at the second least significant bit and the
* common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
* as 1, which does not match 0.
*
* So with IFRAME-01 we *assume* the hardware will do:
*
* allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
* --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
* --> allow = (0010) == 0000 ? 1 : 0;
* --> allow = 0
*
* Lets now test a frame that should work:
*
* IFRAME-02: 0001 (we should allow)
*
* allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
* --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
* --> allow = (0000) == (0000)
* --> allow = 1
*
* Other examples:
*
* IFRAME-03: 0100 --> allowed
* IFRAME-04: 1001 --> allowed
* IFRAME-05: 1101 --> allowed but its not for us!!!
*
*/
void ath_hw_setbssidmask(struct ath_common *common)
{
void *ah = common->ah;
u32 id1;
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
id1 = REG_READ(ah, AR_STA_ID1) & ~AR_STA_ID1_SADH_MASK;
id1 |= get_unaligned_le16(common->macaddr + 4);
REG_WRITE(ah, AR_STA_ID1, id1);
REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(common->bssidmask));
REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(common->bssidmask + 4));
}
EXPORT_SYMBOL(ath_hw_setbssidmask);
/**
* ath_hw_cycle_counters_update - common function to update cycle counters
*
* @common: the ath_common struct for the device.
*
* This function is used to update all cycle counters in one place.
* It has to be called while holding common->cc_lock!
*/
void ath_hw_cycle_counters_update(struct ath_common *common)
{
u32 cycles, busy, rx, tx;
void *ah = common->ah;
/* freeze */
REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
/* read */
cycles = REG_READ(ah, AR_CCCNT);
busy = REG_READ(ah, AR_RCCNT);
rx = REG_READ(ah, AR_RFCNT);
tx = REG_READ(ah, AR_TFCNT);
/* clear */
REG_WRITE(ah, AR_CCCNT, 0);
REG_WRITE(ah, AR_RFCNT, 0);
REG_WRITE(ah, AR_RCCNT, 0);
REG_WRITE(ah, AR_TFCNT, 0);
/* unfreeze */
REG_WRITE(ah, AR_MIBC, 0);
/* update all cycle counters here */
common->cc_ani.cycles += cycles;
common->cc_ani.rx_busy += busy;
common->cc_ani.rx_frame += rx;
common->cc_ani.tx_frame += tx;
common->cc_survey.cycles += cycles;
common->cc_survey.rx_busy += busy;
common->cc_survey.rx_frame += rx;
common->cc_survey.tx_frame += tx;
}
EXPORT_SYMBOL(ath_hw_cycle_counters_update);
int32_t ath_hw_get_listen_time(struct ath_common *common)
{
struct ath_cycle_counters *cc = &common->cc_ani;
int32_t listen_time;
listen_time = (cc->cycles - cc->rx_frame - cc->tx_frame) /
(common->clockrate * 1000);
memset(cc, 0, sizeof(*cc));
return listen_time;
}
EXPORT_SYMBOL(ath_hw_get_listen_time);
|
linux-master
|
drivers/net/wireless/ath/hw.c
|
/*
* Copyright (c) 2012 Neratec Solutions AG
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "ath.h"
#include "dfs_pattern_detector.h"
#include "dfs_pri_detector.h"
struct ath_dfs_pool_stats global_dfs_pool_stats = {};
#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
#define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
(MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
MIN + PRI_TOLERANCE : RUNTIME)
/*
* struct pulse_elem - elements in pulse queue
*/
struct pulse_elem {
struct list_head head;
u64 ts;
};
/*
* pde_get_multiple() - get number of multiples considering a given tolerance
* Return value: factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
*/
static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
{
u32 remainder;
u32 factor;
u32 delta;
if (fraction == 0)
return 0;
delta = (val < fraction) ? (fraction - val) : (val - fraction);
if (delta <= tolerance)
/* val and fraction are within tolerance */
return 1;
factor = val / fraction;
remainder = val % fraction;
if (remainder > tolerance) {
/* no exact match */
if ((fraction - remainder) <= tolerance)
/* remainder is within tolerance */
factor++;
else
factor = 0;
}
return factor;
}
/*
* DOC: Singleton Pulse and Sequence Pools
*
* Instances of pri_sequence and pulse_elem are kept in singleton pools to
* reduce the number of dynamic allocations. They are shared between all
* instances and grow up to the peak number of simultaneously used objects.
*
* Memory is freed after all references to the pools are released.
*/
static u32 singleton_pool_references;
static LIST_HEAD(pulse_pool);
static LIST_HEAD(pseq_pool);
static DEFINE_SPINLOCK(pool_lock);
static void pool_register_ref(void)
{
spin_lock_bh(&pool_lock);
singleton_pool_references++;
DFS_POOL_STAT_INC(pool_reference);
spin_unlock_bh(&pool_lock);
}
static void pool_deregister_ref(void)
{
spin_lock_bh(&pool_lock);
singleton_pool_references--;
DFS_POOL_STAT_DEC(pool_reference);
if (singleton_pool_references == 0) {
/* free singleton pools with no references left */
struct pri_sequence *ps, *ps0;
struct pulse_elem *p, *p0;
list_for_each_entry_safe(p, p0, &pulse_pool, head) {
list_del(&p->head);
DFS_POOL_STAT_DEC(pulse_allocated);
kfree(p);
}
list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
list_del(&ps->head);
DFS_POOL_STAT_DEC(pseq_allocated);
kfree(ps);
}
}
spin_unlock_bh(&pool_lock);
}
static void pool_put_pulse_elem(struct pulse_elem *pe)
{
spin_lock_bh(&pool_lock);
list_add(&pe->head, &pulse_pool);
DFS_POOL_STAT_DEC(pulse_used);
spin_unlock_bh(&pool_lock);
}
static void pool_put_pseq_elem(struct pri_sequence *pse)
{
spin_lock_bh(&pool_lock);
list_add(&pse->head, &pseq_pool);
DFS_POOL_STAT_DEC(pseq_used);
spin_unlock_bh(&pool_lock);
}
static struct pri_sequence *pool_get_pseq_elem(void)
{
struct pri_sequence *pse = NULL;
spin_lock_bh(&pool_lock);
if (!list_empty(&pseq_pool)) {
pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
list_del(&pse->head);
DFS_POOL_STAT_INC(pseq_used);
}
spin_unlock_bh(&pool_lock);
return pse;
}
static struct pulse_elem *pool_get_pulse_elem(void)
{
struct pulse_elem *pe = NULL;
spin_lock_bh(&pool_lock);
if (!list_empty(&pulse_pool)) {
pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
list_del(&pe->head);
DFS_POOL_STAT_INC(pulse_used);
}
spin_unlock_bh(&pool_lock);
return pe;
}
static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
{
struct list_head *l = &pde->pulses;
if (list_empty(l))
return NULL;
return list_entry(l->prev, struct pulse_elem, head);
}
static bool pulse_queue_dequeue(struct pri_detector *pde)
{
struct pulse_elem *p = pulse_queue_get_tail(pde);
if (p != NULL) {
list_del_init(&p->head);
pde->count--;
/* give it back to pool */
pool_put_pulse_elem(p);
}
return (pde->count > 0);
}
/* remove pulses older than window */
static void pulse_queue_check_window(struct pri_detector *pde)
{
u64 min_valid_ts;
struct pulse_elem *p;
/* there is no delta time with less than 2 pulses */
if (pde->count < 2)
return;
if (pde->last_ts <= pde->window_size)
return;
min_valid_ts = pde->last_ts - pde->window_size;
while ((p = pulse_queue_get_tail(pde)) != NULL) {
if (p->ts >= min_valid_ts)
return;
pulse_queue_dequeue(pde);
}
}
static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
{
struct pulse_elem *p = pool_get_pulse_elem();
if (p == NULL) {
p = kmalloc(sizeof(*p), GFP_ATOMIC);
if (p == NULL) {
DFS_POOL_STAT_INC(pulse_alloc_error);
return false;
}
DFS_POOL_STAT_INC(pulse_allocated);
DFS_POOL_STAT_INC(pulse_used);
}
INIT_LIST_HEAD(&p->head);
p->ts = ts;
list_add(&p->head, &pde->pulses);
pde->count++;
pde->last_ts = ts;
pulse_queue_check_window(pde);
if (pde->count >= pde->max_count)
pulse_queue_dequeue(pde);
return true;
}
static bool pseq_handler_create_sequences(struct pri_detector *pde,
u64 ts, u32 min_count)
{
struct pulse_elem *p;
list_for_each_entry(p, &pde->pulses, head) {
struct pri_sequence ps, *new_ps;
struct pulse_elem *p2;
u32 tmp_false_count;
u64 min_valid_ts;
u32 delta_ts = ts - p->ts;
if (delta_ts < pde->rs->pri_min)
/* ignore too small pri */
continue;
if (delta_ts > pde->rs->pri_max)
/* stop on too large pri (sorted list) */
break;
/* build a new sequence with new potential pri */
ps.count = 2;
ps.count_falses = 0;
ps.first_ts = p->ts;
ps.last_ts = ts;
ps.pri = GET_PRI_TO_USE(pde->rs->pri_min,
pde->rs->pri_max, ts - p->ts);
ps.dur = ps.pri * (pde->rs->ppb - 1)
+ 2 * pde->rs->max_pri_tolerance;
p2 = p;
tmp_false_count = 0;
min_valid_ts = ts - ps.dur;
/* check which past pulses are candidates for new sequence */
list_for_each_entry_continue(p2, &pde->pulses, head) {
u32 factor;
if (p2->ts < min_valid_ts)
/* stop on crossing window border */
break;
/* check if pulse match (multi)PRI */
factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
pde->rs->max_pri_tolerance);
if (factor > 0) {
ps.count++;
ps.first_ts = p2->ts;
/*
* on match, add the intermediate falses
* and reset counter
*/
ps.count_falses += tmp_false_count;
tmp_false_count = 0;
} else {
/* this is a potential false one */
tmp_false_count++;
}
}
if (ps.count <= min_count)
/* did not reach minimum count, drop sequence */
continue;
/* this is a valid one, add it */
ps.deadline_ts = ps.first_ts + ps.dur;
new_ps = pool_get_pseq_elem();
if (new_ps == NULL) {
new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
if (new_ps == NULL) {
DFS_POOL_STAT_INC(pseq_alloc_error);
return false;
}
DFS_POOL_STAT_INC(pseq_allocated);
DFS_POOL_STAT_INC(pseq_used);
}
memcpy(new_ps, &ps, sizeof(ps));
INIT_LIST_HEAD(&new_ps->head);
list_add(&new_ps->head, &pde->sequences);
}
return true;
}
/* check new ts and add to all matching existing sequences */
static u32
pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
{
u32 max_count = 0;
struct pri_sequence *ps, *ps2;
list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
u32 delta_ts;
u32 factor;
/* first ensure that sequence is within window */
if (ts > ps->deadline_ts) {
list_del_init(&ps->head);
pool_put_pseq_elem(ps);
continue;
}
delta_ts = ts - ps->last_ts;
factor = pde_get_multiple(delta_ts, ps->pri,
pde->rs->max_pri_tolerance);
if (factor > 0) {
ps->last_ts = ts;
ps->count++;
if (max_count < ps->count)
max_count = ps->count;
} else {
ps->count_falses++;
}
}
return max_count;
}
static struct pri_sequence *
pseq_handler_check_detection(struct pri_detector *pde)
{
struct pri_sequence *ps;
if (list_empty(&pde->sequences))
return NULL;
list_for_each_entry(ps, &pde->sequences, head) {
/*
* we assume to have enough matching confidence if we
* 1) have enough pulses
* 2) have more matching than false pulses
*/
if ((ps->count >= pde->rs->ppb_thresh) &&
(ps->count * pde->rs->num_pri >= ps->count_falses))
return ps;
}
return NULL;
}
/* free pulse queue and sequences list and give objects back to pools */
static void pri_detector_reset(struct pri_detector *pde, u64 ts)
{
struct pri_sequence *ps, *ps0;
struct pulse_elem *p, *p0;
list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
list_del_init(&ps->head);
pool_put_pseq_elem(ps);
}
list_for_each_entry_safe(p, p0, &pde->pulses, head) {
list_del_init(&p->head);
pool_put_pulse_elem(p);
}
pde->count = 0;
pde->last_ts = ts;
}
static void pri_detector_exit(struct pri_detector *de)
{
pri_detector_reset(de, 0);
pool_deregister_ref();
kfree(de);
}
static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
struct pulse_event *event)
{
u32 max_updated_seq;
struct pri_sequence *ps;
u64 ts = event->ts;
const struct radar_detector_specs *rs = de->rs;
/* ignore pulses not within width range */
if ((rs->width_min > event->width) || (rs->width_max < event->width))
return NULL;
if ((ts - de->last_ts) < rs->max_pri_tolerance)
/* if delta to last pulse is too short, don't use this pulse */
return NULL;
/* radar detector spec needs chirp, but not detected */
if (rs->chirp && rs->chirp != event->chirp)
return NULL;
de->last_ts = ts;
max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
pri_detector_reset(de, ts);
return NULL;
}
ps = pseq_handler_check_detection(de);
if (ps == NULL)
pulse_queue_enqueue(de, ts);
return ps;
}
struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
{
struct pri_detector *de;
de = kzalloc(sizeof(*de), GFP_ATOMIC);
if (de == NULL)
return NULL;
de->exit = pri_detector_exit;
de->add_pulse = pri_detector_add_pulse;
de->reset = pri_detector_reset;
INIT_LIST_HEAD(&de->sequences);
INIT_LIST_HEAD(&de->pulses);
de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
de->max_count = rs->ppb * 2;
de->rs = rs;
pool_register_ref();
return de;
}
|
linux-master
|
drivers/net/wireless/ath/dfs_pri_detector.c
|
/*
* Copyright (c) 2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include "ath.h"
const char *ath_opmode_to_string(enum nl80211_iftype opmode)
{
switch (opmode) {
case NL80211_IFTYPE_UNSPECIFIED:
return "UNSPEC";
case NL80211_IFTYPE_ADHOC:
return "ADHOC";
case NL80211_IFTYPE_STATION:
return "STATION";
case NL80211_IFTYPE_AP:
return "AP";
case NL80211_IFTYPE_AP_VLAN:
return "AP-VLAN";
case NL80211_IFTYPE_WDS:
return "WDS";
case NL80211_IFTYPE_MONITOR:
return "MONITOR";
case NL80211_IFTYPE_MESH_POINT:
return "MESH";
case NL80211_IFTYPE_P2P_CLIENT:
return "P2P-CLIENT";
case NL80211_IFTYPE_P2P_GO:
return "P2P-GO";
case NL80211_IFTYPE_OCB:
return "OCB";
default:
return "UNKNOWN";
}
}
EXPORT_SYMBOL(ath_opmode_to_string);
|
linux-master
|
drivers/net/wireless/ath/debug.c
|
/*
* Copyright (c) 2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include "ath.h"
#include "trace.h"
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Shared library for Atheros wireless LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
u32 len,
gfp_t gfp_mask)
{
struct sk_buff *skb;
u32 off;
/*
* Cache-line-align. This is important (for the
* 5210 at least) as not doing so causes bogus data
* in rx'd frames.
*/
/* Note: the kernel can allocate a value greater than
* what we ask it to give us. We really only need 4 KB as that
* is this hardware supports and in fact we need at least 3849
* as that is the MAX AMSDU size this hardware supports.
* Unfortunately this means we may get 8 KB here from the
* kernel... and that is actually what is observed on some
* systems :( */
skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask);
if (skb != NULL) {
off = ((unsigned long) skb->data) % common->cachelsz;
if (off != 0)
skb_reserve(skb, common->cachelsz - off);
} else {
pr_err("skbuff alloc of size %u failed\n", len);
return NULL;
}
return skb;
}
EXPORT_SYMBOL(ath_rxbuf_alloc);
bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr)
{
return ieee80211_is_beacon(hdr->frame_control) &&
!is_zero_ether_addr(common->curbssid) &&
ether_addr_equal_64bits(hdr->addr3, common->curbssid);
}
EXPORT_SYMBOL(ath_is_mybeacon);
void ath_printk(const char *level, const struct ath_common* common,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (common && common->hw && common->hw->wiphy) {
printk("%sath: %s: %pV",
level, wiphy_name(common->hw->wiphy), &vaf);
trace_ath_log(common->hw->wiphy, &vaf);
} else {
printk("%sath: %pV", level, &vaf);
}
va_end(args);
}
EXPORT_SYMBOL(ath_printk);
const char *ath_bus_type_strings[] = {
[ATH_PCI] = "pci",
[ATH_AHB] = "ahb",
[ATH_USB] = "usb",
};
EXPORT_SYMBOL(ath_bus_type_strings);
|
linux-master
|
drivers/net/wireless/ath/main.c
|
/*
* Copyright (c) 2012 Neratec Solutions AG
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include "dfs_pattern_detector.h"
#include "dfs_pri_detector.h"
#include "ath.h"
/**
* struct radar_types - contains array of patterns defined for one DFS domain
* @region: regulatory DFS region
* @num_radar_types: number of radar types to follow
* @radar_types: radar types array
*/
struct radar_types {
enum nl80211_dfs_regions region;
u32 num_radar_types;
const struct radar_detector_specs *radar_types;
};
/* percentage on ppb threshold to trigger detection */
#define MIN_PPB_THRESH 50
#define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
#define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
/* percentage of pulse width tolerance */
#define WIDTH_TOLERANCE 5
#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
(PRF2PRI(PMAX) - PRI_TOLERANCE), \
(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
/* radar types as defined by ETSI EN-301-893 v1.5.1 */
static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18, false),
ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10, false),
ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15, false),
ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25, false),
ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10, false),
ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15, false),
};
static const struct radar_types etsi_radar_types_v15 = {
.region = NL80211_DFS_ETSI,
.num_radar_types = ARRAY_SIZE(etsi_radar_ref_types_v15),
.radar_types = etsi_radar_ref_types_v15,
};
#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
PMIN - PRI_TOLERANCE, \
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
/* radar types released on August 14, 2014
* type 1 PRI values randomly selected within the range of 518 and 3066.
* divide it to 3 groups is good enough for both of radar detection and
* avoiding false detection based on practical test results
* collected for more than a year.
*/
static const struct radar_detector_specs fcc_radar_ref_types[] = {
FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18, false),
FCC_PATTERN(101, 0, 1, 518, 938, 1, 57, false),
FCC_PATTERN(102, 0, 1, 938, 2000, 1, 27, false),
FCC_PATTERN(103, 0, 1, 2000, 3066, 1, 18, false),
FCC_PATTERN(2, 0, 5, 150, 230, 1, 23, false),
FCC_PATTERN(3, 6, 10, 200, 500, 1, 16, false),
FCC_PATTERN(4, 11, 20, 200, 500, 1, 12, false),
FCC_PATTERN(5, 50, 100, 1000, 2000, 1, 1, true),
FCC_PATTERN(6, 0, 1, 333, 333, 1, 9, false),
};
static const struct radar_types fcc_radar_types = {
.region = NL80211_DFS_FCC,
.num_radar_types = ARRAY_SIZE(fcc_radar_ref_types),
.radar_types = fcc_radar_ref_types,
};
#define JP_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, RATE, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
PMIN - PRI_TOLERANCE, \
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
PPB_THRESH_RATE(PPB, RATE), PRI_TOLERANCE, CHIRP \
}
static const struct radar_detector_specs jp_radar_ref_types[] = {
JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
JP_PATTERN(3, 0, 4, 4000, 4000, 1, 18, 50, false),
JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
JP_PATTERN(7, 50, 100, 1000, 2000, 1, 3, 50, true),
JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
};
static const struct radar_types jp_radar_types = {
.region = NL80211_DFS_JP,
.num_radar_types = ARRAY_SIZE(jp_radar_ref_types),
.radar_types = jp_radar_ref_types,
};
static const struct radar_types *dfs_domains[] = {
&etsi_radar_types_v15,
&fcc_radar_types,
&jp_radar_types,
};
/**
* get_dfs_domain_radar_types() - get radar types for a given DFS domain
* @region: regulatory DFS region
*
* Return value: radar_types ptr on success, NULL if DFS domain is not supported
*/
static const struct radar_types *
get_dfs_domain_radar_types(enum nl80211_dfs_regions region)
{
u32 i;
for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) {
if (dfs_domains[i]->region == region)
return dfs_domains[i];
}
return NULL;
}
/**
* struct channel_detector - detector elements for a DFS channel
* @head: list_head
* @freq: frequency for this channel detector in MHz
* @detectors: array of dynamically created detector elements for this freq
*
* Channel detectors are required to provide multi-channel DFS detection, e.g.
* to support off-channel scanning. A pattern detector has a list of channels
* radar pulses have been reported for in the past.
*/
struct channel_detector {
struct list_head head;
u16 freq;
struct pri_detector **detectors;
};
/* channel_detector_reset() - reset detector lines for a given channel */
static void channel_detector_reset(struct dfs_pattern_detector *dpd,
struct channel_detector *cd)
{
u32 i;
if (cd == NULL)
return;
for (i = 0; i < dpd->num_radar_types; i++)
cd->detectors[i]->reset(cd->detectors[i], dpd->last_pulse_ts);
}
/* channel_detector_exit() - destructor */
static void channel_detector_exit(struct dfs_pattern_detector *dpd,
struct channel_detector *cd)
{
u32 i;
if (cd == NULL)
return;
list_del(&cd->head);
if (cd->detectors) {
for (i = 0; i < dpd->num_radar_types; i++) {
struct pri_detector *de = cd->detectors[i];
if (de != NULL)
de->exit(de);
}
}
kfree(cd->detectors);
kfree(cd);
}
static struct channel_detector *
channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
{
u32 i;
struct channel_detector *cd;
cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
if (cd == NULL)
goto fail;
INIT_LIST_HEAD(&cd->head);
cd->freq = freq;
cd->detectors = kmalloc_array(dpd->num_radar_types,
sizeof(*cd->detectors), GFP_ATOMIC);
if (cd->detectors == NULL)
goto fail;
for (i = 0; i < dpd->num_radar_types; i++) {
const struct radar_detector_specs *rs = &dpd->radar_spec[i];
struct pri_detector *de = pri_detector_init(rs);
if (de == NULL)
goto fail;
cd->detectors[i] = de;
}
list_add(&cd->head, &dpd->channel_detectors);
return cd;
fail:
ath_dbg(dpd->common, DFS,
"failed to allocate channel_detector for freq=%d\n", freq);
channel_detector_exit(dpd, cd);
return NULL;
}
/**
* channel_detector_get() - get channel detector for given frequency
* @dpd: DPD instance pointer
* @freq: freq frequency in MHz
*
* Return value: pointer to channel detector on success, NULL otherwise
*
* Return existing channel detector for the given frequency or return a
* newly create one.
*/
static struct channel_detector *
channel_detector_get(struct dfs_pattern_detector *dpd, u16 freq)
{
struct channel_detector *cd;
list_for_each_entry(cd, &dpd->channel_detectors, head) {
if (cd->freq == freq)
return cd;
}
return channel_detector_create(dpd, freq);
}
/*
* DFS Pattern Detector
*/
/* dpd_reset(): reset all channel detectors */
static void dpd_reset(struct dfs_pattern_detector *dpd)
{
struct channel_detector *cd;
list_for_each_entry(cd, &dpd->channel_detectors, head)
channel_detector_reset(dpd, cd);
}
static void dpd_exit(struct dfs_pattern_detector *dpd)
{
struct channel_detector *cd, *cd0;
list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
channel_detector_exit(dpd, cd);
kfree(dpd);
}
static bool
dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event,
struct radar_detector_specs *rs)
{
u32 i;
struct channel_detector *cd;
/*
* pulses received for a non-supported or un-initialized
* domain are treated as detected radars for fail-safety
*/
if (dpd->region == NL80211_DFS_UNSET)
return true;
cd = channel_detector_get(dpd, event->freq);
if (cd == NULL)
return false;
/* reset detector on time stamp wraparound, caused by TSF reset */
if (event->ts < dpd->last_pulse_ts)
dpd_reset(dpd);
dpd->last_pulse_ts = event->ts;
/* do type individual pattern matching */
for (i = 0; i < dpd->num_radar_types; i++) {
struct pri_detector *pd = cd->detectors[i];
struct pri_sequence *ps = pd->add_pulse(pd, event);
if (ps != NULL) {
if (rs != NULL)
memcpy(rs, pd->rs, sizeof(*rs));
ath_dbg(dpd->common, DFS,
"DFS: radar found on freq=%d: id=%d, pri=%d, "
"count=%d, count_false=%d\n",
event->freq, pd->rs->type_id,
ps->pri, ps->count, ps->count_falses);
pd->reset(pd, dpd->last_pulse_ts);
return true;
}
}
return false;
}
static struct ath_dfs_pool_stats
dpd_get_stats(struct dfs_pattern_detector *dpd)
{
return global_dfs_pool_stats;
}
static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
enum nl80211_dfs_regions region)
{
const struct radar_types *rt;
struct channel_detector *cd, *cd0;
if (dpd->region == region)
return true;
dpd->region = NL80211_DFS_UNSET;
rt = get_dfs_domain_radar_types(region);
if (rt == NULL)
return false;
/* delete all channel detectors for previous DFS domain */
list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
channel_detector_exit(dpd, cd);
dpd->radar_spec = rt->radar_types;
dpd->num_radar_types = rt->num_radar_types;
dpd->region = region;
return true;
}
static const struct dfs_pattern_detector default_dpd = {
.exit = dpd_exit,
.set_dfs_domain = dpd_set_domain,
.add_pulse = dpd_add_pulse,
.get_stats = dpd_get_stats,
.region = NL80211_DFS_UNSET,
};
struct dfs_pattern_detector *
dfs_pattern_detector_init(struct ath_common *common,
enum nl80211_dfs_regions region)
{
struct dfs_pattern_detector *dpd;
if (!IS_ENABLED(CONFIG_CFG80211_CERTIFICATION_ONUS))
return NULL;
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
if (dpd == NULL)
return NULL;
*dpd = default_dpd;
INIT_LIST_HEAD(&dpd->channel_detectors);
dpd->common = common;
if (dpd->set_dfs_domain(dpd, region))
return dpd;
ath_dbg(common, DFS,"Could not set DFS domain to %d", region);
kfree(dpd);
return NULL;
}
EXPORT_SYMBOL(dfs_pattern_detector_init);
|
linux-master
|
drivers/net/wireless/ath/dfs_pattern_detector.c
|
/*
* Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/export.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include "regd.h"
#include "regd_common.h"
static int __ath_regd_init(struct ath_regulatory *reg);
/*
* This is a set of common rules used by our world regulatory domains.
* We have 12 world regulatory domains. To save space we consolidate
* the regulatory domains in 5 structures by frequency and change
* the flags on our reg_notifier() on a case by case basis.
*/
/* Only these channels all allow active scan on all world regulatory domains */
#define ATH_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
/* We enable active scan on these a case by case basis by regulatory domain */
#define ATH_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
NL80211_RRF_NO_IR)
#define ATH_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
NL80211_RRF_NO_IR | \
NL80211_RRF_NO_OFDM)
/* We allow IBSS on these on a case by case basis by regulatory domain */
#define ATH_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ATH_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ATH_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ATH_2GHZ_ALL ATH_2GHZ_CH01_11, \
ATH_2GHZ_CH12_13, \
ATH_2GHZ_CH14
#define ATH_5GHZ_ALL ATH_5GHZ_5150_5350, \
ATH_5GHZ_5470_5850
/* This one skips what we call "mid band" */
#define ATH_5GHZ_NO_MIDBAND ATH_5GHZ_5150_5350, \
ATH_5GHZ_5725_5850
/* Can be used for:
* 0x60, 0x61, 0x62 */
static const struct ieee80211_regdomain ath_world_regdom_60_61_62 = {
.n_reg_rules = 5,
.alpha2 = "99",
.reg_rules = {
ATH_2GHZ_ALL,
ATH_5GHZ_ALL,
}
};
/* Can be used by 0x63 and 0x65 */
static const struct ieee80211_regdomain ath_world_regdom_63_65 = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
ATH_2GHZ_CH01_11,
ATH_2GHZ_CH12_13,
ATH_5GHZ_NO_MIDBAND,
}
};
/* Can be used by 0x64 only */
static const struct ieee80211_regdomain ath_world_regdom_64 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
ATH_2GHZ_CH01_11,
ATH_5GHZ_NO_MIDBAND,
}
};
/* Can be used by 0x66 and 0x69 */
static const struct ieee80211_regdomain ath_world_regdom_66_69 = {
.n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
ATH_2GHZ_CH01_11,
ATH_5GHZ_ALL,
}
};
/* Can be used by 0x67, 0x68, 0x6A and 0x6C */
static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
.n_reg_rules = 4,
.alpha2 = "99",
.reg_rules = {
ATH_2GHZ_CH01_11,
ATH_2GHZ_CH12_13,
ATH_5GHZ_ALL,
}
};
static bool dynamic_country_user_possible(struct ath_regulatory *reg)
{
if (IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
return true;
switch (reg->country_code) {
case CTRY_UNITED_STATES:
case CTRY_JAPAN1:
case CTRY_JAPAN2:
case CTRY_JAPAN3:
case CTRY_JAPAN4:
case CTRY_JAPAN5:
case CTRY_JAPAN6:
case CTRY_JAPAN7:
case CTRY_JAPAN8:
case CTRY_JAPAN9:
case CTRY_JAPAN10:
case CTRY_JAPAN11:
case CTRY_JAPAN12:
case CTRY_JAPAN13:
case CTRY_JAPAN14:
case CTRY_JAPAN15:
case CTRY_JAPAN16:
case CTRY_JAPAN17:
case CTRY_JAPAN18:
case CTRY_JAPAN19:
case CTRY_JAPAN20:
case CTRY_JAPAN21:
case CTRY_JAPAN22:
case CTRY_JAPAN23:
case CTRY_JAPAN24:
case CTRY_JAPAN25:
case CTRY_JAPAN26:
case CTRY_JAPAN27:
case CTRY_JAPAN28:
case CTRY_JAPAN29:
case CTRY_JAPAN30:
case CTRY_JAPAN31:
case CTRY_JAPAN32:
case CTRY_JAPAN33:
case CTRY_JAPAN34:
case CTRY_JAPAN35:
case CTRY_JAPAN36:
case CTRY_JAPAN37:
case CTRY_JAPAN38:
case CTRY_JAPAN39:
case CTRY_JAPAN40:
case CTRY_JAPAN41:
case CTRY_JAPAN42:
case CTRY_JAPAN43:
case CTRY_JAPAN44:
case CTRY_JAPAN45:
case CTRY_JAPAN46:
case CTRY_JAPAN47:
case CTRY_JAPAN48:
case CTRY_JAPAN49:
case CTRY_JAPAN50:
case CTRY_JAPAN51:
case CTRY_JAPAN52:
case CTRY_JAPAN53:
case CTRY_JAPAN54:
case CTRY_JAPAN55:
case CTRY_JAPAN56:
case CTRY_JAPAN57:
case CTRY_JAPAN58:
case CTRY_JAPAN59:
return false;
}
return true;
}
static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg)
{
if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
return false;
if (!dynamic_country_user_possible(reg))
return false;
return true;
}
static inline bool is_wwr_sku(u16 regd)
{
return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
(((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
(regd == WORLD));
}
static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg)
{
return reg->current_rd & ~WORLDWIDE_ROAMING_FLAG;
}
bool ath_is_world_regd(struct ath_regulatory *reg)
{
return is_wwr_sku(ath_regd_get_eepromRD(reg));
}
EXPORT_SYMBOL(ath_is_world_regd);
static const struct ieee80211_regdomain *ath_default_world_regdomain(void)
{
/* this is the most restrictive */
return &ath_world_regdom_64;
}
static const struct
ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
{
switch (reg->regpair->reg_domain) {
case 0x60:
case 0x61:
case 0x62:
return &ath_world_regdom_60_61_62;
case 0x63:
case 0x65:
return &ath_world_regdom_63_65;
case 0x64:
return &ath_world_regdom_64;
case 0x66:
case 0x69:
return &ath_world_regdom_66_69;
case 0x67:
case 0x68:
case 0x6A:
case 0x6C:
return &ath_world_regdom_67_68_6A_6C;
default:
WARN_ON(1);
return ath_default_world_regdomain();
}
}
bool ath_is_49ghz_allowed(u16 regdomain)
{
/* possibly more */
return regdomain == MKK9_MKKC;
}
EXPORT_SYMBOL(ath_is_49ghz_allowed);
/* Frequency is one where radar detection is required */
static bool ath_is_radar_freq(u16 center_freq,
struct ath_regulatory *reg)
{
if (reg->country_code == CTRY_INDIA)
return (center_freq >= 5500 && center_freq <= 5700);
return (center_freq >= 5260 && center_freq <= 5700);
}
static void ath_force_clear_no_ir_chan(struct wiphy *wiphy,
struct ieee80211_channel *ch)
{
const struct ieee80211_reg_rule *reg_rule;
reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
if (IS_ERR(reg_rule))
return;
if (!(reg_rule->flags & NL80211_RRF_NO_IR))
if (ch->flags & IEEE80211_CHAN_NO_IR)
ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
static void ath_force_clear_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
{
struct ieee80211_channel *ch;
ch = ieee80211_get_channel(wiphy, center_freq);
if (!ch)
return;
ath_force_clear_no_ir_chan(wiphy, ch);
}
static void ath_force_no_ir_chan(struct ieee80211_channel *ch)
{
ch->flags |= IEEE80211_CHAN_NO_IR;
}
static void ath_force_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
{
struct ieee80211_channel *ch;
ch = ieee80211_get_channel(wiphy, center_freq);
if (!ch)
return;
ath_force_no_ir_chan(ch);
}
static void
__ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
struct ath_regulatory *reg,
enum nl80211_reg_initiator initiator,
struct ieee80211_channel *ch)
{
if (ath_is_radar_freq(ch->center_freq, reg) ||
(ch->flags & IEEE80211_CHAN_RADAR))
return;
switch (initiator) {
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
ath_force_clear_no_ir_chan(wiphy, ch);
break;
case NL80211_REGDOM_SET_BY_USER:
if (ath_reg_dyn_country_user_allow(reg))
ath_force_clear_no_ir_chan(wiphy, ch);
break;
default:
if (ch->beacon_found)
ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
}
/*
* These exception rules do not apply radar frequencies.
*
* - We enable initiating radiation if the country IE says its fine:
* - If no country IE has been processed and a we determine we have
* received a beacon on a channel we can enable initiating radiation.
*/
static void
ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
struct ath_regulatory *reg,
enum nl80211_reg_initiator initiator)
{
enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
unsigned int i;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
__ath_reg_apply_beaconing_flags(wiphy, reg,
initiator, ch);
}
}
}
/**
* ath_reg_apply_ir_flags()
* @wiphy: the wiphy to use
* @reg: regulatory structure - used for country selection
* @initiator: the regulatory hint initiator
*
* If no country IE has been received always enable passive scan
* and no-ibss on these channels. This is only done for specific
* regulatory SKUs.
*
* If a country IE has been received check its rule for this
* channel first before enabling active scan. The passive scan
* would have been enforced by the initial processing of our
* custom regulatory domain.
*/
static void
ath_reg_apply_ir_flags(struct wiphy *wiphy,
struct ath_regulatory *reg,
enum nl80211_reg_initiator initiator)
{
struct ieee80211_supported_band *sband;
sband = wiphy->bands[NL80211_BAND_2GHZ];
if (!sband)
return;
switch(initiator) {
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
ath_force_clear_no_ir_freq(wiphy, 2467);
ath_force_clear_no_ir_freq(wiphy, 2472);
break;
case NL80211_REGDOM_SET_BY_USER:
if (!ath_reg_dyn_country_user_allow(reg))
break;
ath_force_clear_no_ir_freq(wiphy, 2467);
ath_force_clear_no_ir_freq(wiphy, 2472);
break;
default:
ath_force_no_ir_freq(wiphy, 2467);
ath_force_no_ir_freq(wiphy, 2472);
}
}
/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */
static void ath_reg_apply_radar_flags(struct wiphy *wiphy,
struct ath_regulatory *reg)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
unsigned int i;
if (!wiphy->bands[NL80211_BAND_5GHZ])
return;
sband = wiphy->bands[NL80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
if (!ath_is_radar_freq(ch->center_freq, reg))
continue;
/* We always enable radar detection/DFS on this
* frequency range. Additionally we also apply on
* this frequency range:
* - If STA mode does not yet have DFS supports disable
* active scanning
* - If adhoc mode does not support DFS yet then
* disable adhoc in the frequency.
* - If AP mode does not yet support radar detection/DFS
* do not allow AP mode
*/
if (!(ch->flags & IEEE80211_CHAN_DISABLED))
ch->flags |= IEEE80211_CHAN_RADAR |
IEEE80211_CHAN_NO_IR;
}
}
static void ath_reg_apply_world_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
struct ath_regulatory *reg)
{
switch (reg->regpair->reg_domain) {
case 0x60:
case 0x63:
case 0x66:
case 0x67:
case 0x6C:
ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
break;
case 0x68:
ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
ath_reg_apply_ir_flags(wiphy, reg, initiator);
break;
default:
if (ath_reg_dyn_country_user_allow(reg))
ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
}
}
u16 ath_regd_find_country_by_name(char *alpha2)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
if (!memcmp(allCountries[i].isoName, alpha2, 2))
return allCountries[i].countryCode;
}
return -1;
}
EXPORT_SYMBOL(ath_regd_find_country_by_name);
static int __ath_reg_dyn_country(struct wiphy *wiphy,
struct ath_regulatory *reg,
struct regulatory_request *request)
{
u16 country_code;
if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
!ath_is_world_regd(reg))
return -EINVAL;
country_code = ath_regd_find_country_by_name(request->alpha2);
if (country_code == (u16) -1)
return -EINVAL;
reg->current_rd = COUNTRY_ERD_FLAG;
reg->current_rd |= country_code;
__ath_regd_init(reg);
ath_reg_apply_world_flags(wiphy, request->initiator, reg);
return 0;
}
static void ath_reg_dyn_country(struct wiphy *wiphy,
struct ath_regulatory *reg,
struct regulatory_request *request)
{
if (__ath_reg_dyn_country(wiphy, reg, request))
return;
printk(KERN_DEBUG "ath: regdomain 0x%0x "
"dynamically updated by %s\n",
reg->current_rd,
reg_initiator_name(request->initiator));
}
void ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
/* We always apply this */
ath_reg_apply_radar_flags(wiphy, reg);
/*
* This would happen when we have sent a custom regulatory request
* a world regulatory domain and the scheduler hasn't yet processed
* any pending requests in the queue.
*/
if (!request)
return;
reg->region = request->dfs_region;
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
/*
* If common->reg_world_copy is world roaming it means we *were*
* world roaming... so we now have to restore that data.
*/
if (!ath_is_world_regd(&common->reg_world_copy))
break;
memcpy(reg, &common->reg_world_copy,
sizeof(struct ath_regulatory));
break;
case NL80211_REGDOM_SET_BY_DRIVER:
break;
case NL80211_REGDOM_SET_BY_USER:
if (ath_reg_dyn_country_user_allow(reg))
ath_reg_dyn_country(wiphy, reg, request);
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
ath_reg_dyn_country(wiphy, reg, request);
break;
}
}
EXPORT_SYMBOL(ath_reg_notifier_apply);
static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
{
u16 rd = ath_regd_get_eepromRD(reg);
int i;
if (rd & COUNTRY_ERD_FLAG) {
/* EEPROM value is a country code */
u16 cc = rd & ~COUNTRY_ERD_FLAG;
printk(KERN_DEBUG
"ath: EEPROM indicates we should expect "
"a country code\n");
for (i = 0; i < ARRAY_SIZE(allCountries); i++)
if (allCountries[i].countryCode == cc)
return true;
} else {
/* EEPROM value is a regpair value */
if (rd != CTRY_DEFAULT)
printk(KERN_DEBUG "ath: EEPROM indicates we "
"should expect a direct regpair map\n");
for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
if (regDomainPairs[i].reg_domain == rd)
return true;
}
printk(KERN_DEBUG
"ath: invalid regulatory domain/country code 0x%x\n", rd);
return false;
}
/* EEPROM country code to regpair mapping */
static struct country_code_to_enum_rd*
ath_regd_find_country(u16 countryCode)
{
int i;
for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
if (allCountries[i].countryCode == countryCode)
return &allCountries[i];
}
return NULL;
}
/* EEPROM rd code to regpair mapping */
static struct country_code_to_enum_rd*
ath_regd_find_country_by_rd(int regdmn)
{
int i;
for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
if (allCountries[i].regDmnEnum == regdmn)
return &allCountries[i];
}
return NULL;
}
/* Returns the map of the EEPROM set RD to a country code */
static u16 ath_regd_get_default_country(u16 rd)
{
if (rd & COUNTRY_ERD_FLAG) {
struct country_code_to_enum_rd *country = NULL;
u16 cc = rd & ~COUNTRY_ERD_FLAG;
country = ath_regd_find_country(cc);
if (country != NULL)
return cc;
}
return CTRY_DEFAULT;
}
static struct reg_dmn_pair_mapping*
ath_get_regpair(int regdmn)
{
int i;
if (regdmn == NO_ENUMRD)
return NULL;
for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
if (regDomainPairs[i].reg_domain == regdmn)
return ®DomainPairs[i];
}
return NULL;
}
static int
ath_regd_init_wiphy(struct ath_regulatory *reg,
struct wiphy *wiphy,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
{
const struct ieee80211_regdomain *regd;
wiphy->reg_notifier = reg_notifier;
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
REGULATORY_CUSTOM_REG;
if (ath_is_world_regd(reg)) {
/*
* Anything applied here (prior to wiphy registration) gets
* saved on the wiphy orig_* parameters
*/
regd = ath_world_regdomain(reg);
wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_FOLLOW_POWER;
} else {
/*
* This gets applied in the case of the absence of CRDA,
* it's our own custom world regulatory domain, similar to
* cfg80211's but we enable passive scanning.
*/
regd = ath_default_world_regdomain();
}
wiphy_apply_custom_regulatory(wiphy, regd);
ath_reg_apply_radar_flags(wiphy, reg);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
return 0;
}
/*
* Some users have reported their EEPROM programmed with
* 0x8000 set, this is not a supported regulatory domain
* but since we have more than one user with it we need
* a solution for them. We default to 0x64, which is the
* default Atheros world regulatory domain.
*/
static void ath_regd_sanitize(struct ath_regulatory *reg)
{
if (reg->current_rd != COUNTRY_ERD_FLAG)
return;
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
reg->current_rd = 0x64;
}
static int __ath_regd_init(struct ath_regulatory *reg)
{
struct country_code_to_enum_rd *country = NULL;
u16 regdmn;
if (!reg)
return -EINVAL;
ath_regd_sanitize(reg);
printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
if (!ath_regd_is_eeprom_valid(reg)) {
pr_err("Invalid EEPROM contents\n");
return -EINVAL;
}
regdmn = ath_regd_get_eepromRD(reg);
reg->country_code = ath_regd_get_default_country(regdmn);
if (reg->country_code == CTRY_DEFAULT &&
regdmn == CTRY_DEFAULT) {
printk(KERN_DEBUG "ath: EEPROM indicates default "
"country code should be used\n");
reg->country_code = CTRY_UNITED_STATES;
}
if (reg->country_code == CTRY_DEFAULT) {
country = NULL;
} else {
printk(KERN_DEBUG "ath: doing EEPROM country->regdmn "
"map search\n");
country = ath_regd_find_country(reg->country_code);
if (country == NULL) {
printk(KERN_DEBUG
"ath: no valid country maps found for "
"country code: 0x%0x\n",
reg->country_code);
return -EINVAL;
} else {
regdmn = country->regDmnEnum;
printk(KERN_DEBUG "ath: country maps to "
"regdmn code: 0x%0x\n",
regdmn);
}
}
reg->regpair = ath_get_regpair(regdmn);
if (!reg->regpair) {
printk(KERN_DEBUG "ath: "
"No regulatory domain pair found, cannot continue\n");
return -EINVAL;
}
if (!country)
country = ath_regd_find_country_by_rd(regdmn);
if (country) {
reg->alpha2[0] = country->isoName[0];
reg->alpha2[1] = country->isoName[1];
} else {
reg->alpha2[0] = '0';
reg->alpha2[1] = '0';
}
printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n",
reg->alpha2[0], reg->alpha2[1]);
printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
reg->regpair->reg_domain);
return 0;
}
int
ath_regd_init(struct ath_regulatory *reg,
struct wiphy *wiphy,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
int r;
r = __ath_regd_init(reg);
if (r)
return r;
if (ath_is_world_regd(reg))
memcpy(&common->reg_world_copy, reg,
sizeof(struct ath_regulatory));
ath_regd_init_wiphy(reg, wiphy, reg_notifier);
return 0;
}
EXPORT_SYMBOL(ath_regd_init);
u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
enum nl80211_band band)
{
if (!reg->regpair ||
(reg->country_code == CTRY_DEFAULT &&
is_wwr_sku(ath_regd_get_eepromRD(reg)))) {
return SD_NO_CTL;
}
if (ath_regd_get_eepromRD(reg) == CTRY_DEFAULT) {
switch (reg->region) {
case NL80211_DFS_FCC:
return CTL_FCC;
case NL80211_DFS_ETSI:
return CTL_ETSI;
case NL80211_DFS_JP:
return CTL_MKK;
default:
break;
}
}
switch (band) {
case NL80211_BAND_2GHZ:
return reg->regpair->reg_2ghz_ctl;
case NL80211_BAND_5GHZ:
return reg->regpair->reg_5ghz_ctl;
default:
return NO_CTL;
}
}
EXPORT_SYMBOL(ath_regd_get_band_ctl);
|
linux-master
|
drivers/net/wireless/ath/regd.c
|
/*
* Copyright (c) 2009 Atheros Communications Inc.
* Copyright (c) 2010 Bruno Randolf <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include "ath.h"
#include "reg.h"
#define REG_READ (common->ops->read)
#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
#define ENABLE_REGWRITE_BUFFER(_ah) \
if (common->ops->enable_write_buffer) \
common->ops->enable_write_buffer((_ah));
#define REGWRITE_BUFFER_FLUSH(_ah) \
if (common->ops->write_flush) \
common->ops->write_flush((_ah));
#define IEEE80211_WEP_NKID 4 /* number of key ids */
/************************/
/* Key Cache Management */
/************************/
bool ath_hw_keyreset(struct ath_common *common, u16 entry)
{
u32 keyType;
void *ah = common->ah;
if (entry >= common->keymax) {
ath_err(common, "keyreset: keycache entry %u out of range\n",
entry);
return false;
}
keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
if (keyType == AR_KEYTABLE_TYPE_TKIP) {
u16 micentry = entry + 64;
REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
AR_KEYTABLE_TYPE_CLR);
}
}
REGWRITE_BUFFER_FLUSH(ah);
return true;
}
EXPORT_SYMBOL(ath_hw_keyreset);
bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
{
u32 macHi, macLo;
u32 unicast_flag = AR_KEYTABLE_VALID;
void *ah = common->ah;
if (entry >= common->keymax) {
ath_err(common, "keysetmac: keycache entry %u out of range\n",
entry);
return false;
}
if (mac != NULL) {
/*
* AR_KEYTABLE_VALID indicates that the address is a unicast
* address, which must match the transmitter address for
* decrypting frames.
* Not setting this bit allows the hardware to use the key
* for multicast frame decryption.
*/
if (is_multicast_ether_addr(mac))
unicast_flag = 0;
macLo = get_unaligned_le32(mac);
macHi = get_unaligned_le16(mac + 4);
macLo >>= 1;
macLo |= (macHi & 1) << 31;
macHi >>= 1;
} else {
macLo = macHi = 0;
}
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
REGWRITE_BUFFER_FLUSH(ah);
return true;
}
EXPORT_SYMBOL(ath_hw_keysetmac);
static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
const struct ath_keyval *k,
const u8 *mac)
{
void *ah = common->ah;
u32 key0, key1, key2, key3, key4;
u32 keyType;
if (entry >= common->keymax) {
ath_err(common, "set-entry: keycache entry %u out of range\n",
entry);
return false;
}
switch (k->kv_type) {
case ATH_CIPHER_AES_OCB:
keyType = AR_KEYTABLE_TYPE_AES;
break;
case ATH_CIPHER_AES_CCM:
if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
ath_dbg(common, ANY,
"AES-CCM not supported by this mac rev\n");
return false;
}
keyType = AR_KEYTABLE_TYPE_CCM;
break;
case ATH_CIPHER_TKIP:
keyType = AR_KEYTABLE_TYPE_TKIP;
if (entry + 64 >= common->keymax) {
ath_dbg(common, ANY,
"entry %u inappropriate for TKIP\n", entry);
return false;
}
break;
case ATH_CIPHER_WEP:
if (k->kv_len < WLAN_KEY_LEN_WEP40) {
ath_dbg(common, ANY, "WEP key length %u too small\n",
k->kv_len);
return false;
}
if (k->kv_len <= WLAN_KEY_LEN_WEP40)
keyType = AR_KEYTABLE_TYPE_40;
else if (k->kv_len <= WLAN_KEY_LEN_WEP104)
keyType = AR_KEYTABLE_TYPE_104;
else
keyType = AR_KEYTABLE_TYPE_128;
break;
case ATH_CIPHER_CLR:
keyType = AR_KEYTABLE_TYPE_CLR;
break;
default:
ath_err(common, "cipher %u not supported\n", k->kv_type);
return false;
}
key0 = get_unaligned_le32(k->kv_val + 0);
key1 = get_unaligned_le16(k->kv_val + 4);
key2 = get_unaligned_le32(k->kv_val + 6);
key3 = get_unaligned_le16(k->kv_val + 10);
key4 = get_unaligned_le32(k->kv_val + 12);
if (k->kv_len <= WLAN_KEY_LEN_WEP104)
key4 &= 0xff;
/*
* Note: Key cache registers access special memory area that requires
* two 32-bit writes to actually update the values in the internal
* memory. Consequently, the exact order and pairs used here must be
* maintained.
*/
if (keyType == AR_KEYTABLE_TYPE_TKIP) {
u16 micentry = entry + 64;
/*
* Write inverted key[47:0] first to avoid Michael MIC errors
* on frames that could be sent or received at the same time.
* The correct key will be written in the end once everything
* else is ready.
*/
REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
/* Write key[95:48] */
REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
/* Write key[127:96] and key type */
REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
/* Write MAC address for the entry */
(void) ath_hw_keysetmac(common, entry, mac);
if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
/*
* TKIP uses two key cache entries:
* Michael MIC TX/RX keys in the same key cache entry
* (idx = main index + 64):
* key0 [31:0] = RX key [31:0]
* key1 [15:0] = TX key [31:16]
* key1 [31:16] = reserved
* key2 [31:0] = RX key [63:32]
* key3 [15:0] = TX key [15:0]
* key3 [31:16] = reserved
* key4 [31:0] = TX key [63:32]
*/
u32 mic0, mic1, mic2, mic3, mic4;
mic0 = get_unaligned_le32(k->kv_mic + 0);
mic2 = get_unaligned_le32(k->kv_mic + 4);
mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
mic4 = get_unaligned_le32(k->kv_txmic + 4);
ENABLE_REGWRITE_BUFFER(ah);
/* Write RX[31:0] and TX[31:16] */
REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
/* Write RX[63:32] and TX[15:0] */
REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
/* Write TX[63:32] and keyType(reserved) */
REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
AR_KEYTABLE_TYPE_CLR);
REGWRITE_BUFFER_FLUSH(ah);
} else {
/*
* TKIP uses four key cache entries (two for group
* keys):
* Michael MIC TX/RX keys are in different key cache
* entries (idx = main index + 64 for TX and
* main index + 32 + 96 for RX):
* key0 [31:0] = TX/RX MIC key [31:0]
* key1 [31:0] = reserved
* key2 [31:0] = TX/RX MIC key [63:32]
* key3 [31:0] = reserved
* key4 [31:0] = reserved
*
* Upper layer code will call this function separately
* for TX and RX keys when these registers offsets are
* used.
*/
u32 mic0, mic2;
mic0 = get_unaligned_le32(k->kv_mic + 0);
mic2 = get_unaligned_le32(k->kv_mic + 4);
ENABLE_REGWRITE_BUFFER(ah);
/* Write MIC key[31:0] */
REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
/* Write MIC key[63:32] */
REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
/* Write TX[63:32] and keyType(reserved) */
REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
AR_KEYTABLE_TYPE_CLR);
REGWRITE_BUFFER_FLUSH(ah);
}
ENABLE_REGWRITE_BUFFER(ah);
/* MAC address registers are reserved for the MIC entry */
REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
/*
* Write the correct (un-inverted) key[47:0] last to enable
* TKIP now that all other registers are set with correct
* values.
*/
REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
REGWRITE_BUFFER_FLUSH(ah);
} else {
ENABLE_REGWRITE_BUFFER(ah);
/* Write key[47:0] */
REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
/* Write key[95:48] */
REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
/* Write key[127:96] and key type */
REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
REGWRITE_BUFFER_FLUSH(ah);
/* Write MAC address for the entry */
(void) ath_hw_keysetmac(common, entry, mac);
}
return true;
}
static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
struct ath_keyval *hk, const u8 *addr,
bool authenticator)
{
const u8 *key_rxmic;
const u8 *key_txmic;
key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
if (addr == NULL) {
/*
* Group key installation - only two key cache entries are used
* regardless of splitmic capability since group key is only
* used either for TX or RX.
*/
if (authenticator) {
memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
} else {
memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
}
return ath_hw_set_keycache_entry(common, keyix, hk, addr);
}
if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
/* TX and RX keys share the same key cache entry. */
memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
return ath_hw_set_keycache_entry(common, keyix, hk, addr);
}
/* Separate key cache entries for TX and RX */
/* TX key goes at first index, RX key at +32. */
memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
if (!ath_hw_set_keycache_entry(common, keyix, hk, NULL)) {
/* TX MIC entry failed. No need to proceed further */
ath_err(common, "Setting TX MIC Key Failed\n");
return 0;
}
memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
/* XXX delete tx key on failure? */
return ath_hw_set_keycache_entry(common, keyix + 32, hk, addr);
}
static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
{
int i;
for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
if (test_bit(i, common->keymap) ||
test_bit(i + 64, common->keymap))
continue; /* At least one part of TKIP key allocated */
if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) &&
(test_bit(i + 32, common->keymap) ||
test_bit(i + 64 + 32, common->keymap)))
continue; /* At least one part of TKIP key allocated */
/* Found a free slot for a TKIP key */
return i;
}
return -1;
}
static int ath_reserve_key_cache_slot(struct ath_common *common,
u32 cipher)
{
int i;
if (cipher == WLAN_CIPHER_SUITE_TKIP)
return ath_reserve_key_cache_slot_tkip(common);
/* First, try to find slots that would not be available for TKIP. */
if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
if (!test_bit(i, common->keymap) &&
(test_bit(i + 32, common->keymap) ||
test_bit(i + 64, common->keymap) ||
test_bit(i + 64 + 32, common->keymap)))
return i;
if (!test_bit(i + 32, common->keymap) &&
(test_bit(i, common->keymap) ||
test_bit(i + 64, common->keymap) ||
test_bit(i + 64 + 32, common->keymap)))
return i + 32;
if (!test_bit(i + 64, common->keymap) &&
(test_bit(i , common->keymap) ||
test_bit(i + 32, common->keymap) ||
test_bit(i + 64 + 32, common->keymap)))
return i + 64;
if (!test_bit(i + 64 + 32, common->keymap) &&
(test_bit(i, common->keymap) ||
test_bit(i + 32, common->keymap) ||
test_bit(i + 64, common->keymap)))
return i + 64 + 32;
}
} else {
for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
if (!test_bit(i, common->keymap) &&
test_bit(i + 64, common->keymap))
return i;
if (test_bit(i, common->keymap) &&
!test_bit(i + 64, common->keymap))
return i + 64;
}
}
/* No partially used TKIP slots, pick any available slot */
for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
/* Do not allow slots that could be needed for TKIP group keys
* to be used. This limitation could be removed if we know that
* TKIP will not be used. */
if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
continue;
if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
continue;
if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
continue;
}
if (!test_bit(i, common->keymap))
return i; /* Found a free slot for a key */
}
/* No free slot found */
return -1;
}
/*
* Configure encryption in the HW.
*/
int ath_key_config(struct ath_common *common,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct ath_keyval hk;
const u8 *mac = NULL;
u8 gmac[ETH_ALEN];
int ret = 0;
int idx;
memset(&hk, 0, sizeof(hk));
switch (key->cipher) {
case 0:
hk.kv_type = ATH_CIPHER_CLR;
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
hk.kv_type = ATH_CIPHER_WEP;
break;
case WLAN_CIPHER_SUITE_TKIP:
hk.kv_type = ATH_CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
hk.kv_type = ATH_CIPHER_AES_CCM;
break;
default:
return -EOPNOTSUPP;
}
hk.kv_len = key->keylen;
if (key->keylen)
memcpy(&hk.kv_values, key->key, key->keylen);
if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
switch (vif->type) {
case NL80211_IFTYPE_AP:
memcpy(gmac, vif->addr, ETH_ALEN);
gmac[0] |= 0x01;
mac = gmac;
idx = ath_reserve_key_cache_slot(common, key->cipher);
break;
case NL80211_IFTYPE_ADHOC:
if (!sta) {
idx = key->keyidx;
break;
}
memcpy(gmac, sta->addr, ETH_ALEN);
gmac[0] |= 0x01;
mac = gmac;
idx = ath_reserve_key_cache_slot(common, key->cipher);
break;
default:
idx = key->keyidx;
break;
}
} else if (key->keyidx) {
if (WARN_ON(!sta))
return -EOPNOTSUPP;
mac = sta->addr;
if (vif->type != NL80211_IFTYPE_AP) {
/* Only keyidx 0 should be used with unicast key, but
* allow this for client mode for now. */
idx = key->keyidx;
} else
return -EIO;
} else {
if (WARN_ON(!sta))
return -EOPNOTSUPP;
mac = sta->addr;
idx = ath_reserve_key_cache_slot(common, key->cipher);
}
if (idx < 0)
return -ENOSPC; /* no free key cache entries */
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
vif->type == NL80211_IFTYPE_AP);
else
ret = ath_hw_set_keycache_entry(common, idx, &hk, mac);
if (!ret)
return -EIO;
set_bit(idx, common->keymap);
if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
set_bit(idx, common->ccmp_keymap);
if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
set_bit(idx + 64, common->keymap);
set_bit(idx, common->tkip_keymap);
set_bit(idx + 64, common->tkip_keymap);
if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
set_bit(idx + 32, common->keymap);
set_bit(idx + 64 + 32, common->keymap);
set_bit(idx + 32, common->tkip_keymap);
set_bit(idx + 64 + 32, common->tkip_keymap);
}
}
return idx;
}
EXPORT_SYMBOL(ath_key_config);
/*
* Delete Key.
*/
void ath_key_delete(struct ath_common *common, u8 hw_key_idx)
{
/* Leave CCMP and TKIP (main key) configured to avoid disabling
* encryption for potentially pending frames already in a TXQ with the
* keyix pointing to this key entry. Instead, only clear the MAC address
* to prevent RX processing from using this key cache entry.
*/
if (test_bit(hw_key_idx, common->ccmp_keymap) ||
test_bit(hw_key_idx, common->tkip_keymap))
ath_hw_keysetmac(common, hw_key_idx, NULL);
else
ath_hw_keyreset(common, hw_key_idx);
if (hw_key_idx < IEEE80211_WEP_NKID)
return;
clear_bit(hw_key_idx, common->keymap);
clear_bit(hw_key_idx, common->ccmp_keymap);
if (!test_bit(hw_key_idx, common->tkip_keymap))
return;
clear_bit(hw_key_idx + 64, common->keymap);
clear_bit(hw_key_idx, common->tkip_keymap);
clear_bit(hw_key_idx + 64, common->tkip_keymap);
if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
ath_hw_keyreset(common, hw_key_idx + 32);
clear_bit(hw_key_idx + 32, common->keymap);
clear_bit(hw_key_idx + 64 + 32, common->keymap);
clear_bit(hw_key_idx + 32, common->tkip_keymap);
clear_bit(hw_key_idx + 64 + 32, common->tkip_keymap);
}
}
EXPORT_SYMBOL(ath_key_delete);
|
linux-master
|
drivers/net/wireless/ath/key.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
EXPORT_SYMBOL(__tracepoint_ath11k_log_dbg);
|
linux-master
|
drivers/net/wireless/ath/ath11k/trace.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
#include "qmi.h"
#include "core.h"
#include "debug.h"
#include "hif.h"
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/ioport.h>
#include <linux/firmware.h>
#include <linux/of_irq.h>
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
#define HOST_CSTATE_BIT 0x04
#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
#define PLATFORM_CAP_PCIE_PME_D3COLD 0x10
#define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
bool ath11k_cold_boot_cal = 1;
EXPORT_SYMBOL(ath11k_cold_boot_cal);
module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644);
MODULE_PARM_DESC(cold_boot_cal,
"Decrease the channel switch time but increase the driver load time (Default: true)");
static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
num_clients_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
num_clients),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
wake_msi_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
wake_msi),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
gpios_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
gpios_len),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
.elem_size = sizeof(u32),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
gpios),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
nm_modem_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
nm_modem),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
bdf_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
bdf_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
bdf_cache_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
bdf_cache_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
m3_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
m3_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
m3_cache_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
m3_cache_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_filesys_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_filesys_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_cache_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_cache_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_done_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_done),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mem_bucket_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mem_bucket),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1C,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mem_cfg_mode_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1C,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mem_cfg_mode),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_ready_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_ready_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
initiate_cal_download_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
initiate_cal_download_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
initiate_cal_update_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
initiate_cal_update_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
msa_ready_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
msa_ready_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
pin_connect_result_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
pin_connect_result_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
client_id_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
client_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
request_mem_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
request_mem_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_mem_ready_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_mem_ready_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_init_done_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_init_done_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
rejuvenate_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
rejuvenate_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
xo_cal_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
xo_cal_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
cal_done_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
cal_done_enable),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
fw_status_valid),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
fw_status),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
size),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
.elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
.ei_array = qmi_wlanfw_mem_cfg_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
mem_seg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
.elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
mem_seg),
.ei_array = qmi_wlanfw_mem_seg_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
mem_seg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
.elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
mem_seg),
.ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
bar_addr_valid),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
bar_addr),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
bar_size_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
bar_size),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
chip_id),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
chip_family),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
board_id),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
fw_version),
},
{
.data_type = QMI_STRING,
.elem_len = ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
fw_build_timestamp),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
chip_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
chip_info),
.ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
board_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
board_info),
.ei_array = qmi_wlanfw_rf_board_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
soc_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
soc_info),
.ei_array = qmi_wlanfw_soc_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
fw_version_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
fw_version_info),
.ei_array = qmi_wlanfw_fw_version_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
fw_build_id_valid),
},
{
.data_type = QMI_STRING,
.elem_len = ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
fw_build_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
num_macs_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
num_macs),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
voltage_mv_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
voltage_mv),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
time_freq_hz_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
time_freq_hz),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
otp_version_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
otp_version),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
eeprom_read_timeout_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
eeprom_read_timeout),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
valid),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
file_id_valid),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
file_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
total_size_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
total_size),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
seg_id_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
seg_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
data_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
data_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
data),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
end_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
end),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
bdf_type_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
bdf_type),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
pipe_num),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
pipe_dir),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
nentries),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
nbytes_max),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
flags),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
service_id),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
pipe_dir),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
pipe_num),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
},
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
offset),
},
{
.data_type = QMI_EOTI,
.array_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
addr),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
mode),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
hw_debug_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
hw_debug),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
host_version_valid),
},
{
.data_type = QMI_STRING,
.elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
host_version),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
tgt_cfg_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
tgt_cfg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_CE_V01,
.elem_size = sizeof(
struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
tgt_cfg),
.ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
svc_cfg_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
svc_cfg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_SVC_V01,
.elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
svc_cfg),
.ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
.elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg),
.ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_v2_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_v2_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
.elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_v2),
.ei_array = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
static const struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
enablefwlog_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
enablefwlog),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
struct qmi_txn txn;
int ret = 0;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.num_clients_valid = 1;
req.num_clients = 1;
req.mem_cfg_mode = ab->qmi.target_mem_mode;
req.mem_cfg_mode_valid = 1;
req.bdf_support_valid = 1;
req.bdf_support = 1;
if (ab->hw_params.m3_fw_support) {
req.m3_support_valid = 1;
req.m3_support = 1;
req.m3_cache_support_valid = 1;
req.m3_cache_support = 1;
} else {
req.m3_support_valid = 0;
req.m3_support = 0;
req.m3_cache_support_valid = 0;
req.m3_cache_support = 0;
}
req.cal_done_valid = 1;
req.cal_done = ab->qmi.cal_done;
if (ab->hw_params.internal_sleep_clock) {
req.nm_modem_valid = 1;
/* Notify firmware that this is non-qualcomm platform. */
req.nm_modem |= HOST_CSTATE_BIT;
/* Notify firmware about the sleep clock selection,
* nm_modem_bit[1] is used for this purpose. Host driver on
* non-qualcomm platforms should select internal sleep
* clock.
*/
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
}
if (ab->hw_params.global_reset)
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
req.nm_modem |= PLATFORM_CAP_PCIE_PME_D3COLD;
ath11k_dbg(ab, ATH11K_DBG_QMI, "host cap request\n");
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_HOST_CAP_REQ_V01,
QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send host capability request: %d\n", ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0)
goto out;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "host capability request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
return ret;
}
static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_ind_register_req_msg_v01 *req;
struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
struct qmi_handle *handle = &ab->qmi.handle;
struct qmi_txn txn;
int ret;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
resp = kzalloc(sizeof(*resp), GFP_KERNEL);
if (!resp) {
ret = -ENOMEM;
goto resp_out;
}
req->client_id_valid = 1;
req->client_id = QMI_WLANFW_CLIENT_ID;
req->fw_ready_enable_valid = 1;
req->fw_ready_enable = 1;
req->cal_done_enable_valid = 1;
req->cal_done_enable = 1;
req->fw_init_done_enable_valid = 1;
req->fw_init_done_enable = 1;
req->pin_connect_result_enable_valid = 0;
req->pin_connect_result_enable = 0;
/* WCN6750 doesn't request for DDR memory via QMI,
* instead it uses a fixed 12MB reserved memory
* region in DDR.
*/
if (!ab->hw_params.fixed_fw_mem) {
req->request_mem_enable_valid = 1;
req->request_mem_enable = 1;
req->fw_mem_ready_enable_valid = 1;
req->fw_mem_ready_enable = 1;
}
ret = qmi_txn_init(handle, &txn,
qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
if (ret < 0)
goto out;
ath11k_dbg(ab, ATH11K_DBG_QMI, "indication register request\n");
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_IND_REGISTER_REQ_V01,
QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_ind_register_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send indication register request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath11k_warn(ab, "failed to register fw indication: %d\n", ret);
goto out;
}
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "firmware indication register request failed: %d %d\n",
resp->resp.result, resp->resp.error);
ret = -EINVAL;
goto out;
}
out:
kfree(resp);
resp_out:
kfree(req);
return ret;
}
static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn;
int ret = 0, i;
bool delayed;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
memset(&resp, 0, sizeof(resp));
/* For QCA6390 by default FW requests a block of ~4M contiguous
* DMA memory, it's hard to allocate from OS. So host returns
* failure to FW and FW will then request multiple blocks of small
* chunk size memory.
*/
if (!(ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
ab->qmi.target_mem_delayed) {
delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "delays mem_request %d\n",
ab->qmi.mem_seg_count);
memset(req, 0, sizeof(*req));
} else {
delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
for (i = 0; i < req->mem_seg_len ; i++) {
req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
req->mem_seg[i].size = ab->qmi.target_mem[i].size;
req->mem_seg[i].type = ab->qmi.target_mem[i].type;
ath11k_dbg(ab, ATH11K_DBG_QMI,
"req mem_seg[%d] %pad %u %u\n", i,
&ab->qmi.target_mem[i].paddr,
ab->qmi.target_mem[i].size,
ab->qmi.target_mem[i].type);
}
}
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ath11k_dbg(ab, ATH11K_DBG_QMI, "respond memory request delayed %i\n",
delayed);
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_RESPOND_MEM_REQ_V01,
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to respond qmi memory request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath11k_warn(ab, "failed to wait qmi memory request: %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
/* the error response is expected when
* target_mem_delayed is true.
*/
if (delayed && resp.resp.error == 0)
goto out;
ath11k_warn(ab, "qmi respond memory request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
kfree(req);
return ret;
}
static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
if ((ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
ab->qmi.target_mem[i].iaddr)
iounmap(ab->qmi.target_mem[i].iaddr);
if (!ab->qmi.target_mem[i].vaddr)
continue;
dma_free_coherent(ab->dev,
ab->qmi.target_mem[i].prev_size,
ab->qmi.target_mem[i].vaddr,
ab->qmi.target_mem[i].paddr);
ab->qmi.target_mem[i].vaddr = NULL;
}
}
static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
{
int i;
struct target_mem_chunk *chunk;
ab->qmi.target_mem_delayed = false;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
/* Firmware reloads in coldboot/firmware recovery.
* in such case, no need to allocate memory for FW again.
*/
if (chunk->vaddr) {
if (chunk->prev_type == chunk->type &&
chunk->prev_size == chunk->size)
continue;
/* cannot reuse the existing chunk */
dma_free_coherent(ab->dev, chunk->prev_size,
chunk->vaddr, chunk->paddr);
chunk->vaddr = NULL;
}
chunk->vaddr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!chunk->vaddr) {
if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"dma allocation failed (%d B type %u), will try later with small size\n",
chunk->size,
chunk->type);
ath11k_qmi_free_target_mem_chunk(ab);
ab->qmi.target_mem_delayed = true;
return 0;
}
ath11k_err(ab, "failed to allocate dma memory for qmi (%d B type %u)\n",
chunk->size,
chunk->type);
return -EINVAL;
}
chunk->prev_type = chunk->type;
chunk->prev_size = chunk->size;
}
return 0;
}
static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
{
struct device *dev = ab->dev;
struct device_node *hremote_node = NULL;
struct resource res;
u32 host_ddr_sz;
int i, idx, ret;
for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
switch (ab->qmi.target_mem[i].type) {
case HOST_DDR_REGION_TYPE:
hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!hremote_node) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"fail to get hremote_node\n");
return -ENODEV;
}
ret = of_address_to_resource(hremote_node, 0, &res);
of_node_put(hremote_node);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"fail to get reg from hremote\n");
return ret;
}
if (res.end - res.start + 1 < ab->qmi.target_mem[i].size) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"fail to assign memory of sz\n");
return -EINVAL;
}
ab->qmi.target_mem[idx].paddr = res.start;
ab->qmi.target_mem[idx].iaddr =
ioremap(ab->qmi.target_mem[idx].paddr,
ab->qmi.target_mem[i].size);
if (!ab->qmi.target_mem[idx].iaddr)
return -EIO;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
host_ddr_sz = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
break;
case BDF_MEM_REGION_TYPE:
ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
ab->qmi.target_mem[idx].vaddr = NULL;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
break;
case CALDB_MEM_REGION_TYPE:
if (ab->qmi.target_mem[i].size > ATH11K_QMI_CALDB_SIZE) {
ath11k_warn(ab, "qmi mem size is low to load caldata\n");
return -EINVAL;
}
if (ath11k_core_coldboot_cal_support(ab)) {
if (hremote_node) {
ab->qmi.target_mem[idx].paddr =
res.start + host_ddr_sz;
ab->qmi.target_mem[idx].iaddr =
ioremap(ab->qmi.target_mem[idx].paddr,
ab->qmi.target_mem[i].size);
if (!ab->qmi.target_mem[idx].iaddr)
return -EIO;
} else {
ab->qmi.target_mem[idx].paddr =
ATH11K_QMI_CALDB_ADDRESS;
}
} else {
ab->qmi.target_mem[idx].paddr = 0;
ab->qmi.target_mem[idx].vaddr = NULL;
}
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
break;
default:
ath11k_warn(ab, "qmi ignore invalid mem req type %d\n",
ab->qmi.target_mem[i].type);
break;
}
}
ab->qmi.mem_seg_count = idx;
return 0;
}
static int ath11k_qmi_request_device_info(struct ath11k_base *ab)
{
struct qmi_wlanfw_device_info_req_msg_v01 req = {};
struct qmi_wlanfw_device_info_resp_msg_v01 resp = {};
struct qmi_txn txn;
void __iomem *bar_addr_va;
int ret;
/* device info message req is only sent for hybrid bus devices */
if (!ab->hw_params.hybrid_bus_type)
return 0;
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlfw_device_info_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_DEVICE_INFO_REQ_V01,
QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_device_info_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send qmi target device info request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath11k_warn(ab, "failed to wait qmi target device info request: %d\n",
ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "qmi device info request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
if (!resp.bar_addr_valid || !resp.bar_size_valid) {
ath11k_warn(ab, "qmi device info response invalid: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
if (!resp.bar_addr ||
resp.bar_size != ATH11K_QMI_DEVICE_BAR_SIZE) {
ath11k_warn(ab, "qmi device info invalid address and size: %llu %u\n",
resp.bar_addr, resp.bar_size);
ret = -EINVAL;
goto out;
}
bar_addr_va = devm_ioremap(ab->dev, resp.bar_addr, resp.bar_size);
if (!bar_addr_va) {
ath11k_warn(ab, "qmi device info ioremap failed\n");
ab->mem_len = 0;
ret = -EIO;
goto out;
}
ab->mem = bar_addr_va;
ab->mem_len = resp.bar_size;
return 0;
out:
return ret;
}
static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req;
struct qmi_wlanfw_cap_resp_msg_v01 resp;
struct qmi_txn txn;
int ret = 0;
int r;
char *fw_build_id;
int fw_build_id_mask_len;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei,
&resp);
if (ret < 0)
goto out;
ath11k_dbg(ab, ATH11K_DBG_QMI, "target cap request\n");
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_CAP_REQ_V01,
QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_cap_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send qmi cap request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath11k_warn(ab, "failed to wait qmi cap request: %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "qmi cap request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
if (resp.chip_info_valid) {
ab->qmi.target.chip_id = resp.chip_info.chip_id;
ab->qmi.target.chip_family = resp.chip_info.chip_family;
}
if (resp.board_info_valid)
ab->qmi.target.board_id = resp.board_info.board_id;
else
ab->qmi.target.board_id = 0xFF;
if (resp.soc_info_valid)
ab->qmi.target.soc_id = resp.soc_info.soc_id;
if (resp.fw_version_info_valid) {
ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
strscpy(ab->qmi.target.fw_build_timestamp,
resp.fw_version_info.fw_build_timestamp,
sizeof(ab->qmi.target.fw_build_timestamp));
}
if (resp.fw_build_id_valid)
strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
sizeof(ab->qmi.target.fw_build_id));
if (resp.eeprom_read_timeout_valid) {
ab->qmi.target.eeprom_caldata =
resp.eeprom_read_timeout;
ath11k_dbg(ab, ATH11K_DBG_QMI, "cal data supported from eeprom\n");
}
fw_build_id = ab->qmi.target.fw_build_id;
fw_build_id_mask_len = strlen(FW_BUILD_ID_MASK);
if (!strncmp(fw_build_id, FW_BUILD_ID_MASK, fw_build_id_mask_len))
fw_build_id = fw_build_id + fw_build_id_mask_len;
ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id);
ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
ab->qmi.target.fw_version,
ab->qmi.target.fw_build_timestamp,
fw_build_id);
r = ath11k_core_check_smbios(ab);
if (r)
ath11k_dbg(ab, ATH11K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
r = ath11k_core_check_dt(ab);
if (r)
ath11k_dbg(ab, ATH11K_DBG_QMI, "DT bdf variant name not set.\n");
out:
return ret;
}
static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
const u8 *data, u32 len, u8 type)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
struct qmi_txn txn;
const u8 *temp = data;
void __iomem *bdf_addr = NULL;
int ret;
u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
memset(&resp, 0, sizeof(resp));
if (ab->hw_params.fixed_bdf_addr) {
bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
if (!bdf_addr) {
ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
ret = -EIO;
goto err_free_req;
}
}
while (remaining) {
req->valid = 1;
req->file_id_valid = 1;
req->file_id = ab->qmi.target.board_id;
req->total_size_valid = 1;
req->total_size = remaining;
req->seg_id_valid = 1;
req->data_valid = 1;
req->bdf_type = type;
req->bdf_type_valid = 1;
req->end_valid = 1;
req->end = 0;
if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
} else {
req->data_len = remaining;
req->end = 1;
}
if (ab->hw_params.fixed_bdf_addr ||
type == ATH11K_QMI_FILE_TYPE_EEPROM) {
req->data_valid = 0;
req->end = 1;
req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
} else {
memcpy(req->data, temp, req->data_len);
}
if (ab->hw_params.fixed_bdf_addr) {
if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
bdf_addr += ab->hw_params.fw.cal_offset;
memcpy_toio(bdf_addr, temp, len);
}
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_bdf_download_resp_msg_v01_ei,
&resp);
if (ret < 0)
goto err_iounmap;
ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download req fixed addr type %d\n",
type);
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
goto err_iounmap;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath11k_warn(ab, "failed to wait board file download request: %d\n",
ret);
goto err_iounmap;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "board file download request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto err_iounmap;
}
if (ab->hw_params.fixed_bdf_addr ||
type == ATH11K_QMI_FILE_TYPE_EEPROM) {
remaining = 0;
} else {
remaining -= req->data_len;
temp += req->data_len;
req->seg_id++;
ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download request remaining %i\n",
remaining);
}
}
err_iounmap:
if (ab->hw_params.fixed_bdf_addr)
iounmap(bdf_addr);
err_free_req:
kfree(req);
return ret;
}
static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab,
bool regdb)
{
struct device *dev = ab->dev;
char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
const struct firmware *fw_entry;
struct ath11k_board_data bd;
u32 fw_size, file_type;
int ret = 0, bdf_type;
const u8 *tmp;
memset(&bd, 0, sizeof(bd));
if (regdb) {
ret = ath11k_core_fetch_regdb(ab, &bd);
} else {
ret = ath11k_core_fetch_bdf(ab, &bd);
if (ret)
ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret);
}
if (ret)
goto out;
if (regdb)
bdf_type = ATH11K_QMI_BDF_TYPE_REGDB;
else if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
else
bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf_type %d\n", bdf_type);
fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
ret = ath11k_qmi_load_file_target_mem(ab, bd.data, fw_size, bdf_type);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to load bdf file\n");
goto out;
}
/* QCA6390/WCN6855 does not support cal data, skip it */
if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB)
goto out;
if (ab->qmi.target.eeprom_caldata) {
file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
tmp = filename;
fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
} else {
file_type = ATH11K_QMI_FILE_TYPE_CALDATA;
/* cal-<bus>-<id>.bin */
snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
ath11k_bus_str(ab->hif.bus), dev_name(dev));
fw_entry = ath11k_core_firmware_request(ab, filename);
if (!IS_ERR(fw_entry))
goto success;
fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
if (IS_ERR(fw_entry)) {
/* Caldata may not be present during first time calibration in
* factory hence allow to boot without loading caldata in ftm mode
*/
if (ath11k_ftm_mode) {
ath11k_info(ab,
"Booting without cal data file in factory test mode\n");
return 0;
}
ret = PTR_ERR(fw_entry);
ath11k_warn(ab,
"qmi failed to load CAL data file:%s\n",
filename);
goto out;
}
success:
fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size);
tmp = fw_entry->data;
}
ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to load caldata\n");
goto out_qmi_cal;
}
ath11k_dbg(ab, ATH11K_DBG_QMI, "caldata type: %u\n", file_type);
out_qmi_cal:
if (!ab->qmi.target.eeprom_caldata)
release_firmware(fw_entry);
out:
ath11k_core_free_bdf(ab, &bd);
ath11k_dbg(ab, ATH11K_DBG_QMI, "BDF download sequence completed\n");
return ret;
}
static int ath11k_qmi_m3_load(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
const struct firmware *fw;
char path[100];
int ret;
fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE);
if (IS_ERR(fw)) {
ret = PTR_ERR(fw);
ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE,
path, sizeof(path));
ath11k_err(ab, "failed to load %s: %d\n", path, ret);
return ret;
}
if (m3_mem->vaddr || m3_mem->size)
goto skip_m3_alloc;
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
fw->size, &m3_mem->paddr,
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
fw->size);
release_firmware(fw);
return -ENOMEM;
}
skip_m3_alloc:
memcpy(m3_mem->vaddr, fw->data, fw->size);
m3_mem->size = fw->size;
release_firmware(fw);
return 0;
}
static void ath11k_qmi_m3_free(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
if (!ab->hw_params.m3_fw_support || !m3_mem->vaddr)
return;
dma_free_coherent(ab->dev, m3_mem->size,
m3_mem->vaddr, m3_mem->paddr);
m3_mem->vaddr = NULL;
m3_mem->size = 0;
}
static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
struct qmi_txn txn;
int ret = 0;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
if (ab->hw_params.m3_fw_support) {
ret = ath11k_qmi_m3_load(ab);
if (ret) {
ath11k_err(ab, "failed to load m3 firmware: %d", ret);
return ret;
}
req.addr = m3_mem->paddr;
req.size = m3_mem->size;
} else {
req.addr = 0;
req.size = 0;
}
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ath11k_dbg(ab, ATH11K_DBG_QMI, "m3 info req\n");
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_M3_INFO_REQ_V01,
QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send m3 information request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath11k_warn(ab, "failed to wait m3 information request: %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "m3 info request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
return ret;
}
static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
u32 mode)
{
struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
struct qmi_txn txn;
int ret = 0;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.mode = mode;
req.hw_debug_valid = 1;
req.hw_debug = 0;
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan mode req mode %d\n", mode);
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_WLAN_MODE_REQ_V01,
QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n",
mode, ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
if (mode == ATH11K_FIRMWARE_MODE_OFF && ret == -ENETRESET) {
ath11k_warn(ab, "WLFW service is dis-connected\n");
return 0;
}
ath11k_warn(ab, "failed to wait wlan mode request (mode %d): %d\n",
mode, ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "wlan mode request failed (mode: %d): %d %d\n",
mode, resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
return ret;
}
static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
struct ce_pipe_config *ce_cfg;
struct service_to_pipe *svc_cfg;
struct qmi_txn txn;
int ret = 0, pipe_num;
ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
memset(&resp, 0, sizeof(resp));
req->host_version_valid = 1;
strscpy(req->host_version, ATH11K_HOST_VERSION_STRING,
sizeof(req->host_version));
req->tgt_cfg_valid = 1;
/* This is number of CE configs */
req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
}
req->svc_cfg_valid = 1;
/* This is number of Service/CE configs */
req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
}
req->shadow_reg_valid = 0;
/* set shadow v2 configuration */
if (ab->hw_params.supports_shadow_regs) {
req->shadow_reg_v2_valid = 1;
req->shadow_reg_v2_len = min_t(u32,
ab->qmi.ce_cfg.shadow_reg_v2_len,
QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
memcpy(&req->shadow_reg_v2, ab->qmi.ce_cfg.shadow_reg_v2,
sizeof(u32) * req->shadow_reg_v2_len);
} else {
req->shadow_reg_v2_valid = 0;
}
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan cfg req\n");
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_WLAN_CFG_REQ_V01,
QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
ath11k_warn(ab, "failed to send wlan config request: %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath11k_warn(ab, "failed to wait wlan config request: %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "wlan config request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
kfree(req);
return ret;
}
static int ath11k_qmi_wlanfw_wlan_ini_send(struct ath11k_base *ab, bool enable)
{
int ret;
struct qmi_txn txn;
struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {};
req.enablefwlog_valid = true;
req.enablefwlog = enable ? 1 : 0;
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_WLAN_INI_REQ_V01,
QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to send wlan ini request, err = %d\n",
ret);
qmi_txn_cancel(&txn);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath11k_warn(ab, "qmi failed wlan ini request, err = %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "qmi wlan ini request failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
}
out:
return ret;
}
void ath11k_qmi_firmware_stop(struct ath11k_base *ab)
{
int ret;
ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware stop\n");
ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to send wlan mode off: %d\n", ret);
return;
}
}
int ath11k_qmi_firmware_start(struct ath11k_base *ab,
u32 mode)
{
int ret;
ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware start\n");
if (ab->hw_params.fw_wmi_diag_event) {
ret = ath11k_qmi_wlanfw_wlan_ini_send(ab, true);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to send wlan fw ini:%d\n", ret);
return ret;
}
}
ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret);
return ret;
}
ret = ath11k_qmi_wlanfw_mode_send(ab, mode);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret);
return ret;
}
return 0;
}
int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
{
int timeout;
if (!ath11k_core_coldboot_cal_support(ab) ||
ab->hw_params.cbcal_restart_fw == 0)
return 0;
ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n");
timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
(ab->qmi.cal_done == 1),
ATH11K_COLD_BOOT_FW_RESET_DELAY);
if (timeout <= 0) {
ath11k_warn(ab, "Coldboot Calibration timed out\n");
return -ETIMEDOUT;
}
/* reset the firmware */
ath11k_hif_power_down(ab);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n");
return 0;
}
EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot);
static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
{
int timeout;
int ret;
ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n");
timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
(ab->qmi.cal_done == 1),
ATH11K_COLD_BOOT_FW_RESET_DELAY);
if (timeout <= 0) {
ath11k_warn(ab, "coldboot calibration timed out\n");
return 0;
}
ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration done\n");
return 0;
}
static int
ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi,
enum ath11k_qmi_event_type type,
void *data)
{
struct ath11k_qmi_driver_event *event;
event = kzalloc(sizeof(*event), GFP_ATOMIC);
if (!event)
return -ENOMEM;
event->type = type;
event->data = data;
spin_lock(&qmi->event_lock);
list_add_tail(&event->list, &qmi->event_list);
spin_unlock(&qmi->event_lock);
queue_work(qmi->event_wq, &qmi->event_work);
return 0;
}
static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi)
{
struct ath11k_base *ab = qmi->ab;
int ret;
ret = ath11k_qmi_respond_fw_mem_request(ab);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to respond fw mem req: %d\n", ret);
return ret;
}
return ret;
}
static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
{
struct ath11k_base *ab = qmi->ab;
int ret;
ret = ath11k_qmi_request_target_cap(ab);
if (ret < 0) {
ath11k_warn(ab, "failed to request qmi target capabilities: %d\n",
ret);
return ret;
}
ret = ath11k_qmi_request_device_info(ab);
if (ret < 0) {
ath11k_warn(ab, "failed to request qmi device info: %d\n", ret);
return ret;
}
if (ab->hw_params.supports_regdb)
ath11k_qmi_load_bdf_qmi(ab, true);
ret = ath11k_qmi_load_bdf_qmi(ab, false);
if (ret < 0) {
ath11k_warn(ab, "failed to load board data file: %d\n", ret);
return ret;
}
return 0;
}
static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
{
struct ath11k_base *ab = qmi->ab;
int ret;
ret = ath11k_qmi_fw_ind_register_send(ab);
if (ret < 0) {
ath11k_warn(ab, "failed to send qmi firmware indication: %d\n",
ret);
return ret;
}
ret = ath11k_qmi_host_cap_send(ab);
if (ret < 0) {
ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret);
return ret;
}
if (!ab->hw_params.fixed_fw_mem)
return ret;
ret = ath11k_qmi_event_load_bdf(qmi);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to download BDF:%d\n", ret);
return ret;
}
return ret;
}
static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *data)
{
struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
struct ath11k_base *ab = qmi->ab;
const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
int i, ret;
ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware request memory request\n");
if (msg->mem_seg_len == 0 ||
msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
ath11k_warn(ab, "invalid memory segment length: %u\n",
msg->mem_seg_len);
ab->qmi.mem_seg_count = msg->mem_seg_len;
for (i = 0; i < qmi->mem_seg_count ; i++) {
ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
ath11k_dbg(ab, ATH11K_DBG_QMI, "mem seg type %d size %d\n",
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
if (ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_qmi_assign_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "failed to assign qmi target memory: %d\n",
ret);
return;
}
} else {
ret = ath11k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "failed to allocate qmi target memory: %d\n",
ret);
return;
}
}
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL);
}
static void ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *decoded)
{
struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
struct ath11k_base *ab = qmi->ab;
ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware memory ready indication\n");
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_MEM_READY, NULL);
}
static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *decoded)
{
struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
struct ath11k_base *ab = qmi->ab;
ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware ready\n");
if (!ab->qmi.cal_done) {
ab->qmi.cal_done = 1;
wake_up(&ab->qmi.cold_boot_waitq);
}
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
}
static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *decoded)
{
struct ath11k_qmi *qmi = container_of(qmi_hdl,
struct ath11k_qmi, handle);
struct ath11k_base *ab = qmi->ab;
ab->qmi.cal_done = 1;
wake_up(&ab->qmi.cold_boot_waitq);
ath11k_dbg(ab, ATH11K_DBG_QMI, "cold boot calibration done\n");
}
static void ath11k_qmi_msg_fw_init_done_cb(struct qmi_handle *qmi_hdl,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *decoded)
{
struct ath11k_qmi *qmi = container_of(qmi_hdl,
struct ath11k_qmi, handle);
struct ath11k_base *ab = qmi->ab;
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_INIT_DONE, NULL);
ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware init done\n");
}
static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
.ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
.decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01),
.fn = ath11k_qmi_msg_mem_request_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
.ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
.decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01),
.fn = ath11k_qmi_msg_mem_ready_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_READY_IND_V01,
.ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
.decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
.fn = ath11k_qmi_msg_fw_ready_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
.ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
.decoded_size =
sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
.fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
.ei = qmi_wlfw_fw_init_done_ind_msg_v01_ei,
.decoded_size =
sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
.fn = ath11k_qmi_msg_fw_init_done_cb,
},
/* end of list */
{},
};
static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
struct qmi_service *service)
{
struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
struct ath11k_base *ab = qmi->ab;
struct sockaddr_qrtr *sq = &qmi->sq;
int ret;
sq->sq_family = AF_QIPCRTR;
sq->sq_node = service->node;
sq->sq_port = service->port;
ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
sizeof(*sq), 0);
if (ret) {
ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw qmi service connected\n");
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_ARRIVE, NULL);
return ret;
}
static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl,
struct qmi_service *service)
{
struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
struct ath11k_base *ab = qmi->ab;
ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw del server\n");
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_EXIT, NULL);
}
static const struct qmi_ops ath11k_qmi_ops = {
.new_server = ath11k_qmi_ops_new_server,
.del_server = ath11k_qmi_ops_del_server,
};
static void ath11k_qmi_driver_event_work(struct work_struct *work)
{
struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi,
event_work);
struct ath11k_qmi_driver_event *event;
struct ath11k_base *ab = qmi->ab;
int ret;
spin_lock(&qmi->event_lock);
while (!list_empty(&qmi->event_list)) {
event = list_first_entry(&qmi->event_list,
struct ath11k_qmi_driver_event, list);
list_del(&event->list);
spin_unlock(&qmi->event_lock);
if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) {
kfree(event);
return;
}
switch (event->type) {
case ATH11K_QMI_EVENT_SERVER_ARRIVE:
ret = ath11k_qmi_event_server_arrive(qmi);
if (ret < 0)
set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
break;
case ATH11K_QMI_EVENT_SERVER_EXIT:
set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
if (!ab->is_reset)
ath11k_core_pre_reconfigure_recovery(ab);
break;
case ATH11K_QMI_EVENT_REQUEST_MEM:
ret = ath11k_qmi_event_mem_request(qmi);
if (ret < 0)
set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
break;
case ATH11K_QMI_EVENT_FW_MEM_READY:
ret = ath11k_qmi_event_load_bdf(qmi);
if (ret < 0) {
set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
break;
}
ret = ath11k_qmi_wlanfw_m3_info_send(ab);
if (ret < 0) {
ath11k_warn(ab,
"failed to send qmi m3 info req: %d\n", ret);
set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
}
break;
case ATH11K_QMI_EVENT_FW_INIT_DONE:
clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
ath11k_hal_dump_srng_stats(ab);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
if (ab->qmi.cal_done == 0 &&
ath11k_core_coldboot_cal_support(ab)) {
ath11k_qmi_process_coldboot_calibration(ab);
} else {
clear_bit(ATH11K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
ret = ath11k_core_qmi_firmware_ready(ab);
if (ret) {
set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
break;
}
set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
}
break;
case ATH11K_QMI_EVENT_FW_READY:
/* For targets requiring a FW restart upon cold
* boot completion, there is no need to process
* FW ready; such targets will receive FW init
* done message after FW restart.
*/
if (ab->hw_params.cbcal_restart_fw)
break;
clear_bit(ATH11K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
ath11k_core_qmi_firmware_ready(ab);
set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
break;
case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
break;
default:
ath11k_warn(ab, "invalid qmi event type: %d", event->type);
break;
}
kfree(event);
spin_lock(&qmi->event_lock);
}
spin_unlock(&qmi->event_lock);
}
int ath11k_qmi_init_service(struct ath11k_base *ab)
{
int ret;
memset(&ab->qmi.target, 0, sizeof(struct target_info));
memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
ab->qmi.ab = ab;
ab->qmi.target_mem_mode = ab->hw_params.fw_mem_mode;
ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX,
&ath11k_qmi_ops, ath11k_qmi_msg_handlers);
if (ret < 0) {
ath11k_warn(ab, "failed to initialize qmi handle: %d\n", ret);
return ret;
}
ab->qmi.event_wq = alloc_ordered_workqueue("ath11k_qmi_driver_event", 0);
if (!ab->qmi.event_wq) {
ath11k_err(ab, "failed to allocate workqueue\n");
return -EFAULT;
}
INIT_LIST_HEAD(&ab->qmi.event_list);
spin_lock_init(&ab->qmi.event_lock);
INIT_WORK(&ab->qmi.event_work, ath11k_qmi_driver_event_work);
ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01,
ATH11K_QMI_WLFW_SERVICE_VERS_V01,
ab->qmi.service_ins_id);
if (ret < 0) {
ath11k_warn(ab, "failed to add qmi lookup: %d\n", ret);
destroy_workqueue(ab->qmi.event_wq);
return ret;
}
return ret;
}
void ath11k_qmi_deinit_service(struct ath11k_base *ab)
{
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
ath11k_qmi_m3_free(ab);
ath11k_qmi_free_target_mem_chunk(ab);
}
EXPORT_SYMBOL(ath11k_qmi_deinit_service);
void ath11k_qmi_free_resource(struct ath11k_base *ab)
{
ath11k_qmi_free_target_mem_chunk(ab);
ath11k_qmi_m3_free(ab);
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/qmi.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include "core.h"
#include "ce.h"
#include "hif.h"
#include "hal.h"
#include "hw.h"
/* Map from pdev index to hw mac index */
static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
{
switch (pdev_idx) {
case 0:
return 0;
case 1:
return 2;
case 2:
return 1;
default:
return ATH11K_INVALID_HW_MAC_ID;
}
}
static u8 ath11k_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
{
return pdev_idx;
}
static void ath11k_hw_ipq8074_tx_mesh_enable(struct ath11k_base *ab,
struct hal_tcl_data_cmd *tcl_cmd)
{
tcl_cmd->info2 |= FIELD_PREP(HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE,
true);
}
static void ath11k_hw_qcn9074_tx_mesh_enable(struct ath11k_base *ab,
struct hal_tcl_data_cmd *tcl_cmd)
{
tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE,
true);
}
static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
struct hal_tcl_data_cmd *tcl_cmd)
{
tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE,
true);
}
static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
struct target_resource_config *config)
{
config->num_vdevs = 4;
config->num_peers = 16;
config->num_tids = 32;
config->num_offload_peers = 3;
config->num_offload_reorder_buffs = 3;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
config->num_mcast_groups = 0;
config->num_mcast_table_elems = 0;
config->mcast2ucast_mode = 0;
config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
config->num_wds_entries = 0;
config->dma_burst_size = 0;
config->rx_skip_defrag_timeout_dup_detection_check = 0;
config->vow_config = TARGET_VOW_CONFIG;
config->gtk_offload_max_vdev = 2;
config->num_msdu_desc = 0x400;
config->beacon_tx_offload_max_vdev = 2;
config->rx_batchmode = TARGET_RX_BATCHMODE;
config->peer_map_unmap_v2_support = 0;
config->use_pdev_id = 1;
config->max_frag_entries = 0xa;
config->num_tdls_vdevs = 0x1;
config->num_tdls_conn_table_entries = 8;
config->beacon_tx_offload_max_vdev = 0x2;
config->num_multicast_filter_entries = 0x20;
config->num_wow_filters = 0x16;
config->num_keep_alive_pattern = 0;
config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
}
static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 val;
/* Each hash entry uses three bits to map to a particular ring. */
u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
HAL_HASH_ROUTING_RING_SW2 << 3 |
HAL_HASH_ROUTING_RING_SW3 << 6 |
HAL_HASH_ROUTING_RING_SW4 << 9 |
HAL_HASH_ROUTING_RING_SW1 << 12 |
HAL_HASH_ROUTING_RING_SW2 << 15 |
HAL_HASH_ROUTING_RING_SW3 << 18 |
HAL_HASH_ROUTING_RING_SW4 << 21;
val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
HAL_SRNG_RING_ID_REO2SW1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
ring_hash_map));
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
ring_hash_map));
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
ring_hash_map));
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
ring_hash_map));
}
static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
struct target_resource_config *config)
{
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
if (ab->num_radios == 2) {
config->num_peers = TARGET_NUM_PEERS(ab, DBS);
config->num_tids = TARGET_NUM_TIDS(ab, DBS);
} else if (ab->num_radios == 3) {
config->num_peers = TARGET_NUM_PEERS(ab, DBS_SBS);
config->num_tids = TARGET_NUM_TIDS(ab, DBS_SBS);
} else {
/* Control should not reach here */
config->num_peers = TARGET_NUM_PEERS(ab, SINGLE);
config->num_tids = TARGET_NUM_TIDS(ab, SINGLE);
}
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
else
config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
config->dma_burst_size = TARGET_DMA_BURST_SIZE;
config->rx_skip_defrag_timeout_dup_detection_check =
TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
config->vow_config = TARGET_VOW_CONFIG;
config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
config->rx_batchmode = TARGET_RX_BATCHMODE;
config->peer_map_unmap_v2_support = 1;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
config->flag1 |= WMI_RSRC_CFG_FLAG1_ACK_RSSI;
config->ema_max_vap_cnt = ab->num_radios;
config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
}
static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw,
int mac_id)
{
return mac_id;
}
static int ath11k_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params *hw,
int mac_id)
{
return 0;
}
static int ath11k_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params *hw,
int mac_id)
{
return 0;
}
static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw,
int mac_id)
{
return mac_id;
}
static bool ath11k_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
__le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
}
static bool ath11k_hw_ipq8074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
__le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
}
static u8 ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
__le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
}
static u8 *ath11k_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
{
return desc->u.ipq8074.hdr_status;
}
static bool ath11k_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
}
static u32 ath11k_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
__le32_to_cpu(desc->u.ipq8074.mpdu_start.info2));
}
static u8 ath11k_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
__le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
}
static u8 ath11k_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
__le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
}
static bool ath11k_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
__le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
}
static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
__le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
}
static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
__le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
}
static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
__le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
}
static u16 ath11k_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
__le32_to_cpu(desc->u.ipq8074.msdu_start.info1));
}
static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_SGI,
__le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
}
static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
__le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
}
static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
__le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
}
static u32 ath11k_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.ipq8074.msdu_start.phy_meta_data);
}
static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
__le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
}
static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
__le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
}
static u8 ath11k_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MPDU_START_INFO2_TID,
__le32_to_cpu(desc->u.ipq8074.mpdu_start.info2));
}
static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.ipq8074.mpdu_start.sw_peer_id);
}
static void ath11k_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
memcpy((u8 *)&fdesc->u.ipq8074.msdu_end, (u8 *)&ldesc->u.ipq8074.msdu_end,
sizeof(struct rx_msdu_end_ipq8074));
memcpy((u8 *)&fdesc->u.ipq8074.attention, (u8 *)&ldesc->u.ipq8074.attention,
sizeof(struct rx_attention));
memcpy((u8 *)&fdesc->u.ipq8074.mpdu_end, (u8 *)&ldesc->u.ipq8074.mpdu_end,
sizeof(struct rx_mpdu_end));
}
static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
{
return FIELD_GET(HAL_TLV_HDR_TAG,
__le32_to_cpu(desc->u.ipq8074.mpdu_start_tag));
}
static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.ipq8074.mpdu_start.phy_ppdu_id);
}
static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
{
u32 info = __le32_to_cpu(desc->u.ipq8074.msdu_start.info1);
info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info);
}
static bool ath11k_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
}
static u8 *ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
{
return desc->u.ipq8074.mpdu_start.addr2;
}
static
struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
{
return &desc->u.ipq8074.attention;
}
static u8 *ath11k_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
{
return &desc->u.ipq8074.msdu_payload[0];
}
static bool ath11k_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU,
__le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
}
static bool ath11k_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU,
__le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
}
static u8 ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING,
__le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
}
static u8 *ath11k_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
{
return desc->u.qcn9074.hdr_status;
}
static bool ath11k_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID;
}
static u32 ath11k_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE,
__le32_to_cpu(desc->u.qcn9074.mpdu_start.info9));
}
static u8 ath11k_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
}
static u8 ath11k_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
}
static bool ath11k_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
}
static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
__le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
}
static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID,
__le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
}
static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM,
__le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
}
static u16 ath11k_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info1));
}
static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_SGI,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
}
static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
}
static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
}
static u32 ath11k_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.qcn9074.msdu_start.phy_meta_data);
}
static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
}
static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
}
static u8 ath11k_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MPDU_START_INFO9_TID,
__le32_to_cpu(desc->u.qcn9074.mpdu_start.info9));
}
static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9074.mpdu_start.sw_peer_id);
}
static void ath11k_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
memcpy((u8 *)&fdesc->u.qcn9074.msdu_end, (u8 *)&ldesc->u.qcn9074.msdu_end,
sizeof(struct rx_msdu_end_qcn9074));
memcpy((u8 *)&fdesc->u.qcn9074.attention, (u8 *)&ldesc->u.qcn9074.attention,
sizeof(struct rx_attention));
memcpy((u8 *)&fdesc->u.qcn9074.mpdu_end, (u8 *)&ldesc->u.qcn9074.mpdu_end,
sizeof(struct rx_mpdu_end));
}
static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
{
return FIELD_GET(HAL_TLV_HDR_TAG,
__le32_to_cpu(desc->u.qcn9074.mpdu_start_tag));
}
static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9074.mpdu_start.phy_ppdu_id);
}
static void ath11k_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
{
u32 info = __le32_to_cpu(desc->u.qcn9074.msdu_start.info1);
info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
desc->u.qcn9074.msdu_start.info1 = __cpu_to_le32(info);
}
static
struct rx_attention *ath11k_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc)
{
return &desc->u.qcn9074.attention;
}
static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
{
return &desc->u.qcn9074.msdu_payload[0];
}
static bool ath11k_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
}
static u8 *ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
{
return desc->u.qcn9074.mpdu_start.addr2;
}
static bool ath11k_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
__le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
}
static bool ath11k_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU_WCN6855,
__le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
}
static u8 ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
__le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
}
static u8 *ath11k_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
{
return desc->u.wcn6855.hdr_status;
}
static bool ath11k_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
}
static u32 ath11k_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
__le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
}
static u8 ath11k_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
}
static u8 ath11k_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
}
static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
__le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
}
static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
__le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
}
static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
__le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
}
static u16 ath11k_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info1));
}
static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_SGI,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
}
static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
}
static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
}
static u32 ath11k_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.wcn6855.msdu_start.phy_meta_data);
}
static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
}
static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
}
static u8 ath11k_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MPDU_START_INFO2_TID_WCN6855,
__le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
}
static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.wcn6855.mpdu_start.sw_peer_id);
}
static void ath11k_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
memcpy((u8 *)&fdesc->u.wcn6855.msdu_end, (u8 *)&ldesc->u.wcn6855.msdu_end,
sizeof(struct rx_msdu_end_wcn6855));
memcpy((u8 *)&fdesc->u.wcn6855.attention, (u8 *)&ldesc->u.wcn6855.attention,
sizeof(struct rx_attention));
memcpy((u8 *)&fdesc->u.wcn6855.mpdu_end, (u8 *)&ldesc->u.wcn6855.mpdu_end,
sizeof(struct rx_mpdu_end));
}
static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
{
return FIELD_GET(HAL_TLV_HDR_TAG,
__le32_to_cpu(desc->u.wcn6855.mpdu_start_tag));
}
static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.wcn6855.mpdu_start.phy_ppdu_id);
}
static void ath11k_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
{
u32 info = __le32_to_cpu(desc->u.wcn6855.msdu_start.info1);
info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
desc->u.wcn6855.msdu_start.info1 = __cpu_to_le32(info);
}
static
struct rx_attention *ath11k_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc *desc)
{
return &desc->u.wcn6855.attention;
}
static u8 *ath11k_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
{
return &desc->u.wcn6855.msdu_payload[0];
}
static bool ath11k_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
}
static u8 *ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
{
return desc->u.wcn6855.mpdu_start.addr2;
}
static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 val;
/* Each hash entry uses four bits to map to a particular ring. */
u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
HAL_HASH_ROUTING_RING_SW2 << 4 |
HAL_HASH_ROUTING_RING_SW3 << 8 |
HAL_HASH_ROUTING_RING_SW4 << 12 |
HAL_HASH_ROUTING_RING_SW1 << 16 |
HAL_HASH_ROUTING_RING_SW2 << 20 |
HAL_HASH_ROUTING_RING_SW3 << 24 |
HAL_HASH_ROUTING_RING_SW4 << 28;
val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL(ab));
val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING, HAL_SRNG_RING_ID_REO2SW1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL(ab), val);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
ring_hash_map);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
ring_hash_map);
}
static void ath11k_hw_ipq5018_reo_setup(struct ath11k_base *ab)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 val;
/* Each hash entry uses three bits to map to a particular ring. */
u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
HAL_HASH_ROUTING_RING_SW2 << 4 |
HAL_HASH_ROUTING_RING_SW3 << 8 |
HAL_HASH_ROUTING_RING_SW4 << 12 |
HAL_HASH_ROUTING_RING_SW1 << 16 |
HAL_HASH_ROUTING_RING_SW2 << 20 |
HAL_HASH_ROUTING_RING_SW3 << 24 |
HAL_HASH_ROUTING_RING_SW4 << 28;
val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
HAL_SRNG_RING_ID_REO2SW1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
ring_hash_map);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
ring_hash_map);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
ring_hash_map);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
ring_hash_map);
}
static u16
ath11k_hw_ipq8074_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
{
u16 peer_id = 0;
peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
__le32_to_cpu(mpdu_info->u.ipq8074.info0));
return peer_id;
}
static u16
ath11k_hw_qcn9074_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
{
u16 peer_id = 0;
peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
__le32_to_cpu(mpdu_info->u.qcn9074.info0));
return peer_id;
}
static u16
ath11k_hw_wcn6855_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
{
u16 peer_id = 0;
peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855,
__le32_to_cpu(mpdu_info->u.wcn6855.info0));
return peer_id;
}
static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
}
static u32 ath11k_hw_ipq8074_get_tcl_ring_selector(struct sk_buff *skb)
{
/* Let the default ring selection be based on current processor
* number, where one of the 3 tcl rings are selected based on
* the smp_processor_id(). In case that ring
* is full/busy, we resort to other available rings.
* If all rings are full, we drop the packet.
*
* TODO: Add throttling logic when all rings are full
*/
return smp_processor_id();
}
static u32 ath11k_hw_wcn6750_get_tcl_ring_selector(struct sk_buff *skb)
{
/* Select the TCL ring based on the flow hash of the SKB instead
* of CPU ID. Since applications pumping the traffic can be scheduled
* on multiple CPUs, there is a chance that packets of the same flow
* could end on different TCL rings, this could sometimes results in
* an out of order arrival of the packets at the receiver.
*/
return skb_get_hash(skb);
}
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
.rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
.rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops ipq6018_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
.rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
.rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qca6390_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_qca6390,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
.tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
.rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
.rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
.rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qcn9074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
.rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
.rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_qcn9074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops wcn6855_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_qca6390,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
.tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_wcn6855_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes,
.rx_desc_get_hdr_status = ath11k_hw_wcn6855_rx_desc_get_hdr_status,
.rx_desc_encrypt_valid = ath11k_hw_wcn6855_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath11k_hw_wcn6855_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath11k_hw_wcn6855_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath11k_hw_wcn6855_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath11k_hw_wcn6855_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath11k_hw_wcn6855_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_attn_end_tlv = ath11k_hw_wcn6855_rx_desc_copy_attn_end,
.rx_desc_get_mpdu_start_tag = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath11k_hw_wcn6855_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_wcn6855_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_wcn6855_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops wcn6750_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_qca6390,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
.rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
.rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_wcn6855_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
};
/* IPQ5018 hw ops is similar to QCN9074 except for the dest ring remap */
const struct ath11k_hw_ops ipq5018_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
.rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
.rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.reo_setup = ath11k_hw_ipq5018_reo_setup,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
#define ATH11K_TX_RING_MASK_0 BIT(0)
#define ATH11K_TX_RING_MASK_1 BIT(1)
#define ATH11K_TX_RING_MASK_2 BIT(2)
#define ATH11K_TX_RING_MASK_3 BIT(3)
#define ATH11K_TX_RING_MASK_4 BIT(4)
#define ATH11K_RX_RING_MASK_0 0x1
#define ATH11K_RX_RING_MASK_1 0x2
#define ATH11K_RX_RING_MASK_2 0x4
#define ATH11K_RX_RING_MASK_3 0x8
#define ATH11K_RX_ERR_RING_MASK_0 0x1
#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
#define ATH11K_REO_STATUS_RING_MASK_0 0x1
#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
.tx = {
ATH11K_TX_RING_MASK_0,
ATH11K_TX_RING_MASK_1,
ATH11K_TX_RING_MASK_2,
},
.rx_mon_status = {
0, 0, 0, 0,
ATH11K_RX_MON_STATUS_RING_MASK_0,
ATH11K_RX_MON_STATUS_RING_MASK_1,
ATH11K_RX_MON_STATUS_RING_MASK_2,
},
.rx = {
0, 0, 0, 0, 0, 0, 0,
ATH11K_RX_RING_MASK_0,
ATH11K_RX_RING_MASK_1,
ATH11K_RX_RING_MASK_2,
ATH11K_RX_RING_MASK_3,
},
.rx_err = {
ATH11K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
ATH11K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
0, 0, 0,
ATH11K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
ATH11K_RXDMA2HOST_RING_MASK_0,
ATH11K_RXDMA2HOST_RING_MASK_1,
ATH11K_RXDMA2HOST_RING_MASK_2,
},
.host2rxdma = {
ATH11K_HOST2RXDMA_RING_MASK_0,
ATH11K_HOST2RXDMA_RING_MASK_1,
ATH11K_HOST2RXDMA_RING_MASK_2,
},
};
const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
.tx = {
ATH11K_TX_RING_MASK_0,
},
.rx_mon_status = {
0, 0, 0, 0,
ATH11K_RX_MON_STATUS_RING_MASK_0,
ATH11K_RX_MON_STATUS_RING_MASK_1,
ATH11K_RX_MON_STATUS_RING_MASK_2,
},
.rx = {
0, 0, 0, 0, 0, 0, 0,
ATH11K_RX_RING_MASK_0,
ATH11K_RX_RING_MASK_1,
ATH11K_RX_RING_MASK_2,
ATH11K_RX_RING_MASK_3,
},
.rx_err = {
ATH11K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
ATH11K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
ATH11K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
ATH11K_RXDMA2HOST_RING_MASK_0,
ATH11K_RXDMA2HOST_RING_MASK_1,
ATH11K_RXDMA2HOST_RING_MASK_2,
},
.host2rxdma = {
},
};
/* Target firmware's Copy Engine configuration. */
const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI */
{
.pipenum = __cpu_to_le32(3),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
.pipenum = __cpu_to_le32(4),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(256),
.nbytes_max = __cpu_to_le32(256),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE5: target->host Pktlog */
{
.pipenum = __cpu_to_le32(5),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(0),
.reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(6),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(65535),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE7 used only by Host */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE8 target->host used only by IPA */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(65535),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE9 host->target HTT */
{
.pipenum = __cpu_to_le32(9),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE10 target->host HTT */
{
.pipenum = __cpu_to_le32(10),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
.nentries = __cpu_to_le32(0),
.nbytes_max = __cpu_to_le32(0),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE11 Not used */
};
/* Map from service/endpoint to Copy Engine.
* This table is derived from the CE_PCI TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[] = {
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(7),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(9),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(0),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{ /* not used */
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(0),
},
{ /* not used */
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(4),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(5),
},
/* (Additions here) */
{ /* terminator entry */ }
};
const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[] = {
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(7),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(0),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{ /* not used */
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(0),
},
{ /* not used */
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(4),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(5),
},
/* (Additions here) */
{ /* terminator entry */ }
};
/* Target firmware's Copy Engine configuration. */
const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI */
{
.pipenum = __cpu_to_le32(3),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
.pipenum = __cpu_to_le32(4),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(256),
.nbytes_max = __cpu_to_le32(256),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE5: target->host Pktlog */
{
.pipenum = __cpu_to_le32(5),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(6),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE7 used only by Host */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
.nentries = __cpu_to_le32(0),
.nbytes_max = __cpu_to_le32(0),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE8 target->host used only by IPA */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE 9, 10, 11 are used by MHI driver */
};
/* Map from service/endpoint to Copy Engine.
* This table is derived from the CE_PCI TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = {
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(0),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(4),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(1),
},
/* (Additions here) */
{ /* must be last */
__cpu_to_le32(0),
__cpu_to_le32(0),
__cpu_to_le32(0),
},
};
/* Target firmware's Copy Engine configuration. */
const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI */
{
.pipenum = __cpu_to_le32(3),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
.pipenum = __cpu_to_le32(4),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(256),
.nbytes_max = __cpu_to_le32(256),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE5: target->host Pktlog */
{
.pipenum = __cpu_to_le32(5),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(6),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE7 used only by Host */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
.nentries = __cpu_to_le32(0),
.nbytes_max = __cpu_to_le32(0),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE8 target->host used only by IPA */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE 9, 10, 11 are used by MHI driver */
};
/* Map from service/endpoint to Copy Engine.
* This table is derived from the CE_PCI TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[] = {
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(0),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(1),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(0),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(1),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(4),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(1),
},
{
__cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(5),
},
/* (Additions here) */
{ /* must be last */
__cpu_to_le32(0),
__cpu_to_le32(0),
__cpu_to_le32(0),
},
};
const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
.tx = {
ATH11K_TX_RING_MASK_0,
ATH11K_TX_RING_MASK_1,
ATH11K_TX_RING_MASK_2,
},
.rx_mon_status = {
0, 0, 0,
ATH11K_RX_MON_STATUS_RING_MASK_0,
ATH11K_RX_MON_STATUS_RING_MASK_1,
ATH11K_RX_MON_STATUS_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
ATH11K_RX_RING_MASK_0,
ATH11K_RX_RING_MASK_1,
ATH11K_RX_RING_MASK_2,
ATH11K_RX_RING_MASK_3,
},
.rx_err = {
0, 0, 0,
ATH11K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
0, 0, 0,
ATH11K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
0, 0, 0,
ATH11K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
0, 0, 0,
ATH11K_RXDMA2HOST_RING_MASK_0,
},
.host2rxdma = {
0, 0, 0,
ATH11K_HOST2RXDMA_RING_MASK_0,
},
};
const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
.tx = {
ATH11K_TX_RING_MASK_0,
0,
ATH11K_TX_RING_MASK_2,
0,
ATH11K_TX_RING_MASK_4,
},
.rx_mon_status = {
0, 0, 0, 0, 0, 0,
ATH11K_RX_MON_STATUS_RING_MASK_0,
},
.rx = {
0, 0, 0, 0, 0, 0, 0,
ATH11K_RX_RING_MASK_0,
ATH11K_RX_RING_MASK_1,
ATH11K_RX_RING_MASK_2,
ATH11K_RX_RING_MASK_3,
},
.rx_err = {
0, ATH11K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
0, ATH11K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
0, ATH11K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
ATH11K_RXDMA2HOST_RING_MASK_0,
ATH11K_RXDMA2HOST_RING_MASK_1,
ATH11K_RXDMA2HOST_RING_MASK_2,
},
.host2rxdma = {
},
};
/* Target firmware's Copy Engine configuration for IPQ5018 */
const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI */
{
.pipenum = __cpu_to_le32(3),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
.pipenum = __cpu_to_le32(4),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(256),
.nbytes_max = __cpu_to_le32(256),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE5: target->host Pktlog */
{
.pipenum = __cpu_to_le32(5),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(6),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE7 used only by Host */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(0x2000),
.reserved = __cpu_to_le32(0),
},
/* CE8 target->host used only by IPA */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
};
/* Map from service/endpoint to Copy Engine for IPQ5018.
* This table is derived from the CE TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[] = {
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(0),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(0),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(4),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(5),
},
/* (Additions here) */
{ /* terminator entry */ }
};
const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074 = {
.ie1_reg_addr = CE_HOST_IE_ADDRESS,
.ie2_reg_addr = CE_HOST_IE_2_ADDRESS,
.ie3_reg_addr = CE_HOST_IE_3_ADDRESS,
};
const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018 = {
.ie1_reg_addr = CE_HOST_IPQ5018_IE_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
.ie2_reg_addr = CE_HOST_IPQ5018_IE_2_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
.ie3_reg_addr = CE_HOST_IPQ5018_IE_3_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
};
const struct ce_remap ath11k_ce_remap_ipq5018 = {
.base = HAL_IPQ5018_CE_WFSS_REG_BASE,
.size = HAL_IPQ5018_CE_SIZE,
};
const struct ath11k_hw_regs ipq8074_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000510,
.hal_tcl1_ring_base_msb = 0x00000514,
.hal_tcl1_ring_id = 0x00000518,
.hal_tcl1_ring_misc = 0x00000520,
.hal_tcl1_ring_tp_addr_lsb = 0x0000052c,
.hal_tcl1_ring_tp_addr_msb = 0x00000530,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000540,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000544,
.hal_tcl1_ring_msi1_base_lsb = 0x00000558,
.hal_tcl1_ring_msi1_base_msb = 0x0000055c,
.hal_tcl1_ring_msi1_data = 0x00000560,
.hal_tcl2_ring_base_lsb = 0x00000568,
.hal_tcl_ring_base_lsb = 0x00000618,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000720,
/* REO2SW(x) R0 ring configuration address */
.hal_reo1_ring_base_lsb = 0x0000029c,
.hal_reo1_ring_base_msb = 0x000002a0,
.hal_reo1_ring_id = 0x000002a4,
.hal_reo1_ring_misc = 0x000002ac,
.hal_reo1_ring_hp_addr_lsb = 0x000002b0,
.hal_reo1_ring_hp_addr_msb = 0x000002b4,
.hal_reo1_ring_producer_int_setup = 0x000002c0,
.hal_reo1_ring_msi1_base_lsb = 0x000002e4,
.hal_reo1_ring_msi1_base_msb = 0x000002e8,
.hal_reo1_ring_msi1_data = 0x000002ec,
.hal_reo2_ring_base_lsb = 0x000002f4,
.hal_reo1_aging_thresh_ix_0 = 0x00000564,
.hal_reo1_aging_thresh_ix_1 = 0x00000568,
.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
.hal_reo1_aging_thresh_ix_3 = 0x00000570,
/* REO2SW(x) R2 ring pointers (head/tail) address */
.hal_reo1_ring_hp = 0x00003038,
.hal_reo1_ring_tp = 0x0000303c,
.hal_reo2_ring_hp = 0x00003040,
/* REO2TCL R0 ring configuration address */
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x00000194,
.hal_reo_cmd_ring_hp = 0x00003020,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x000001ec,
.hal_sw2reo_ring_hp = 0x00003028,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
.hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
.hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
/* WBM Idle address */
.hal_wbm_idle_link_ring_base_lsb = 0x00000860,
.hal_wbm_idle_link_ring_misc = 0x00000870,
/* SW2WBM release address */
.hal_wbm_release_ring_base_lsb = 0x000001d8,
/* WBM2SW release address */
.hal_wbm0_release_ring_base_lsb = 0x00000910,
.hal_wbm1_release_ring_base_lsb = 0x00000968,
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x0,
.pcie_pcs_osc_dtct_config_base = 0x0,
/* Shadow register area */
.hal_shadow_base_addr = 0x0,
/* REO misc control register, not used in IPQ8074 */
.hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs qca6390_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000684,
.hal_tcl1_ring_base_msb = 0x00000688,
.hal_tcl1_ring_id = 0x0000068c,
.hal_tcl1_ring_misc = 0x00000694,
.hal_tcl1_ring_tp_addr_lsb = 0x000006a0,
.hal_tcl1_ring_tp_addr_msb = 0x000006a4,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006b4,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006b8,
.hal_tcl1_ring_msi1_base_lsb = 0x000006cc,
.hal_tcl1_ring_msi1_base_msb = 0x000006d0,
.hal_tcl1_ring_msi1_data = 0x000006d4,
.hal_tcl2_ring_base_lsb = 0x000006dc,
.hal_tcl_ring_base_lsb = 0x0000078c,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000894,
/* REO2SW(x) R0 ring configuration address */
.hal_reo1_ring_base_lsb = 0x00000244,
.hal_reo1_ring_base_msb = 0x00000248,
.hal_reo1_ring_id = 0x0000024c,
.hal_reo1_ring_misc = 0x00000254,
.hal_reo1_ring_hp_addr_lsb = 0x00000258,
.hal_reo1_ring_hp_addr_msb = 0x0000025c,
.hal_reo1_ring_producer_int_setup = 0x00000268,
.hal_reo1_ring_msi1_base_lsb = 0x0000028c,
.hal_reo1_ring_msi1_base_msb = 0x00000290,
.hal_reo1_ring_msi1_data = 0x00000294,
.hal_reo2_ring_base_lsb = 0x0000029c,
.hal_reo1_aging_thresh_ix_0 = 0x0000050c,
.hal_reo1_aging_thresh_ix_1 = 0x00000510,
.hal_reo1_aging_thresh_ix_2 = 0x00000514,
.hal_reo1_aging_thresh_ix_3 = 0x00000518,
/* REO2SW(x) R2 ring pointers (head/tail) address */
.hal_reo1_ring_hp = 0x00003030,
.hal_reo1_ring_tp = 0x00003034,
.hal_reo2_ring_hp = 0x00003038,
/* REO2TCL R0 ring configuration address */
.hal_reo_tcl_ring_base_lsb = 0x000003a4,
.hal_reo_tcl_ring_hp = 0x00003050,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x00000194,
.hal_reo_cmd_ring_hp = 0x00003020,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x000004ac,
.hal_reo_status_hp = 0x00003068,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x000001ec,
.hal_sw2reo_ring_hp = 0x00003028,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
.hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
.hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
/* WBM Idle address */
.hal_wbm_idle_link_ring_base_lsb = 0x00000860,
.hal_wbm_idle_link_ring_misc = 0x00000870,
/* SW2WBM release address */
.hal_wbm_release_ring_base_lsb = 0x000001d8,
/* WBM2SW release address */
.hal_wbm0_release_ring_base_lsb = 0x00000910,
.hal_wbm1_release_ring_base_lsb = 0x00000968,
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
/* Shadow register area */
.hal_shadow_base_addr = 0x000008fc,
/* REO misc control register, not used in QCA6390 */
.hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs qcn9074_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x000004f0,
.hal_tcl1_ring_base_msb = 0x000004f4,
.hal_tcl1_ring_id = 0x000004f8,
.hal_tcl1_ring_misc = 0x00000500,
.hal_tcl1_ring_tp_addr_lsb = 0x0000050c,
.hal_tcl1_ring_tp_addr_msb = 0x00000510,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524,
.hal_tcl1_ring_msi1_base_lsb = 0x00000538,
.hal_tcl1_ring_msi1_base_msb = 0x0000053c,
.hal_tcl1_ring_msi1_data = 0x00000540,
.hal_tcl2_ring_base_lsb = 0x00000548,
.hal_tcl_ring_base_lsb = 0x000005f8,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000700,
/* REO2SW(x) R0 ring configuration address */
.hal_reo1_ring_base_lsb = 0x0000029c,
.hal_reo1_ring_base_msb = 0x000002a0,
.hal_reo1_ring_id = 0x000002a4,
.hal_reo1_ring_misc = 0x000002ac,
.hal_reo1_ring_hp_addr_lsb = 0x000002b0,
.hal_reo1_ring_hp_addr_msb = 0x000002b4,
.hal_reo1_ring_producer_int_setup = 0x000002c0,
.hal_reo1_ring_msi1_base_lsb = 0x000002e4,
.hal_reo1_ring_msi1_base_msb = 0x000002e8,
.hal_reo1_ring_msi1_data = 0x000002ec,
.hal_reo2_ring_base_lsb = 0x000002f4,
.hal_reo1_aging_thresh_ix_0 = 0x00000564,
.hal_reo1_aging_thresh_ix_1 = 0x00000568,
.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
.hal_reo1_aging_thresh_ix_3 = 0x00000570,
/* REO2SW(x) R2 ring pointers (head/tail) address */
.hal_reo1_ring_hp = 0x00003038,
.hal_reo1_ring_tp = 0x0000303c,
.hal_reo2_ring_hp = 0x00003040,
/* REO2TCL R0 ring configuration address */
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x00000194,
.hal_reo_cmd_ring_hp = 0x00003020,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x000001ec,
.hal_sw2reo_ring_hp = 0x00003028,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
.hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
.hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
/* WBM Idle address */
.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
.hal_wbm_idle_link_ring_misc = 0x00000884,
/* SW2WBM release address */
.hal_wbm_release_ring_base_lsb = 0x000001ec,
/* WBM2SW release address */
.hal_wbm0_release_ring_base_lsb = 0x00000924,
.hal_wbm1_release_ring_base_lsb = 0x0000097c,
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
/* Shadow register area */
.hal_shadow_base_addr = 0x0,
/* REO misc control register, not used in QCN9074 */
.hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs wcn6855_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000690,
.hal_tcl1_ring_base_msb = 0x00000694,
.hal_tcl1_ring_id = 0x00000698,
.hal_tcl1_ring_misc = 0x000006a0,
.hal_tcl1_ring_tp_addr_lsb = 0x000006ac,
.hal_tcl1_ring_tp_addr_msb = 0x000006b0,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c0,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c4,
.hal_tcl1_ring_msi1_base_lsb = 0x000006d8,
.hal_tcl1_ring_msi1_base_msb = 0x000006dc,
.hal_tcl1_ring_msi1_data = 0x000006e0,
.hal_tcl2_ring_base_lsb = 0x000006e8,
.hal_tcl_ring_base_lsb = 0x00000798,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x000008a0,
/* REO2SW(x) R0 ring configuration address */
.hal_reo1_ring_base_lsb = 0x00000244,
.hal_reo1_ring_base_msb = 0x00000248,
.hal_reo1_ring_id = 0x0000024c,
.hal_reo1_ring_misc = 0x00000254,
.hal_reo1_ring_hp_addr_lsb = 0x00000258,
.hal_reo1_ring_hp_addr_msb = 0x0000025c,
.hal_reo1_ring_producer_int_setup = 0x00000268,
.hal_reo1_ring_msi1_base_lsb = 0x0000028c,
.hal_reo1_ring_msi1_base_msb = 0x00000290,
.hal_reo1_ring_msi1_data = 0x00000294,
.hal_reo2_ring_base_lsb = 0x0000029c,
.hal_reo1_aging_thresh_ix_0 = 0x000005bc,
.hal_reo1_aging_thresh_ix_1 = 0x000005c0,
.hal_reo1_aging_thresh_ix_2 = 0x000005c4,
.hal_reo1_aging_thresh_ix_3 = 0x000005c8,
/* REO2SW(x) R2 ring pointers (head/tail) address */
.hal_reo1_ring_hp = 0x00003030,
.hal_reo1_ring_tp = 0x00003034,
.hal_reo2_ring_hp = 0x00003038,
/* REO2TCL R0 ring configuration address */
.hal_reo_tcl_ring_base_lsb = 0x00000454,
.hal_reo_tcl_ring_hp = 0x00003060,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x00000194,
.hal_reo_cmd_ring_hp = 0x00003020,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x0000055c,
.hal_reo_status_hp = 0x00003078,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x000001ec,
.hal_sw2reo_ring_hp = 0x00003028,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
.hal_seq_wcss_umac_ce1_src_reg = 0x1b82000,
.hal_seq_wcss_umac_ce1_dst_reg = 0x1b83000,
/* WBM Idle address */
.hal_wbm_idle_link_ring_base_lsb = 0x00000870,
.hal_wbm_idle_link_ring_misc = 0x00000880,
/* SW2WBM release address */
.hal_wbm_release_ring_base_lsb = 0x000001e8,
/* WBM2SW release address */
.hal_wbm0_release_ring_base_lsb = 0x00000920,
.hal_wbm1_release_ring_base_lsb = 0x00000978,
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
/* Shadow register area */
.hal_shadow_base_addr = 0x000008fc,
/* REO misc control register, used for fragment
* destination ring config in WCN6855.
*/
.hal_reo1_misc_ctl = 0x00000630,
};
const struct ath11k_hw_regs wcn6750_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000694,
.hal_tcl1_ring_base_msb = 0x00000698,
.hal_tcl1_ring_id = 0x0000069c,
.hal_tcl1_ring_misc = 0x000006a4,
.hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
.hal_tcl1_ring_tp_addr_msb = 0x000006b4,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
.hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
.hal_tcl1_ring_msi1_base_msb = 0x000006e0,
.hal_tcl1_ring_msi1_data = 0x000006e4,
.hal_tcl2_ring_base_lsb = 0x000006ec,
.hal_tcl_ring_base_lsb = 0x0000079c,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x000008a4,
/* REO2SW(x) R0 ring configuration address */
.hal_reo1_ring_base_lsb = 0x000001ec,
.hal_reo1_ring_base_msb = 0x000001f0,
.hal_reo1_ring_id = 0x000001f4,
.hal_reo1_ring_misc = 0x000001fc,
.hal_reo1_ring_hp_addr_lsb = 0x00000200,
.hal_reo1_ring_hp_addr_msb = 0x00000204,
.hal_reo1_ring_producer_int_setup = 0x00000210,
.hal_reo1_ring_msi1_base_lsb = 0x00000234,
.hal_reo1_ring_msi1_base_msb = 0x00000238,
.hal_reo1_ring_msi1_data = 0x0000023c,
.hal_reo2_ring_base_lsb = 0x00000244,
.hal_reo1_aging_thresh_ix_0 = 0x00000564,
.hal_reo1_aging_thresh_ix_1 = 0x00000568,
.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
.hal_reo1_aging_thresh_ix_3 = 0x00000570,
/* REO2SW(x) R2 ring pointers (head/tail) address */
.hal_reo1_ring_hp = 0x00003028,
.hal_reo1_ring_tp = 0x0000302c,
.hal_reo2_ring_hp = 0x00003030,
/* REO2TCL R0 ring configuration address */
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x000000e4,
.hal_reo_cmd_ring_hp = 0x00003010,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x0000013c,
.hal_sw2reo_ring_hp = 0x00003018,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
.hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
.hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
/* WBM Idle address */
.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
.hal_wbm_idle_link_ring_misc = 0x00000884,
/* SW2WBM release address */
.hal_wbm_release_ring_base_lsb = 0x000001ec,
/* WBM2SW release address */
.hal_wbm0_release_ring_base_lsb = 0x00000924,
.hal_wbm1_release_ring_base_lsb = 0x0000097c,
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x0,
.pcie_pcs_osc_dtct_config_base = 0x0,
/* Shadow register area */
.hal_shadow_base_addr = 0x00000504,
/* REO misc control register, used for fragment
* destination ring config in WCN6750.
*/
.hal_reo1_misc_ctl = 0x000005d8,
};
static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
{
.tcl_ring_num = 0,
.wbm_ring_num = 0,
.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
},
{
.tcl_ring_num = 1,
.wbm_ring_num = 1,
.rbm_id = HAL_RX_BUF_RBM_SW1_BM,
},
{
.tcl_ring_num = 2,
.wbm_ring_num = 2,
.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
},
};
static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
{
.tcl_ring_num = 0,
.wbm_ring_num = 0,
.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
},
{
.tcl_ring_num = 1,
.wbm_ring_num = 4,
.rbm_id = HAL_RX_BUF_RBM_SW4_BM,
},
{
.tcl_ring_num = 2,
.wbm_ring_num = 2,
.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
},
};
const struct ath11k_hw_regs ipq5018_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000694,
.hal_tcl1_ring_base_msb = 0x00000698,
.hal_tcl1_ring_id = 0x0000069c,
.hal_tcl1_ring_misc = 0x000006a4,
.hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
.hal_tcl1_ring_tp_addr_msb = 0x000006b4,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
.hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
.hal_tcl1_ring_msi1_base_msb = 0x000006e0,
.hal_tcl1_ring_msi1_data = 0x000006e4,
.hal_tcl2_ring_base_lsb = 0x000006ec,
.hal_tcl_ring_base_lsb = 0x0000079c,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x000008a4,
/* REO2SW(x) R0 ring configuration address */
.hal_reo1_ring_base_lsb = 0x000001ec,
.hal_reo1_ring_base_msb = 0x000001f0,
.hal_reo1_ring_id = 0x000001f4,
.hal_reo1_ring_misc = 0x000001fc,
.hal_reo1_ring_hp_addr_lsb = 0x00000200,
.hal_reo1_ring_hp_addr_msb = 0x00000204,
.hal_reo1_ring_producer_int_setup = 0x00000210,
.hal_reo1_ring_msi1_base_lsb = 0x00000234,
.hal_reo1_ring_msi1_base_msb = 0x00000238,
.hal_reo1_ring_msi1_data = 0x0000023c,
.hal_reo2_ring_base_lsb = 0x00000244,
.hal_reo1_aging_thresh_ix_0 = 0x00000564,
.hal_reo1_aging_thresh_ix_1 = 0x00000568,
.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
.hal_reo1_aging_thresh_ix_3 = 0x00000570,
/* REO2SW(x) R2 ring pointers (head/tail) address */
.hal_reo1_ring_hp = 0x00003028,
.hal_reo1_ring_tp = 0x0000302c,
.hal_reo2_ring_hp = 0x00003030,
/* REO2TCL R0 ring configuration address */
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x0000013c,
.hal_sw2reo_ring_hp = 0x00003018,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x000000e4,
.hal_reo_cmd_ring_hp = 0x00003010,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x08400000
- HAL_IPQ5018_CE_WFSS_REG_BASE,
.hal_seq_wcss_umac_ce0_dst_reg = 0x08401000
- HAL_IPQ5018_CE_WFSS_REG_BASE,
.hal_seq_wcss_umac_ce1_src_reg = 0x08402000
- HAL_IPQ5018_CE_WFSS_REG_BASE,
.hal_seq_wcss_umac_ce1_dst_reg = 0x08403000
- HAL_IPQ5018_CE_WFSS_REG_BASE,
/* WBM Idle address */
.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
.hal_wbm_idle_link_ring_misc = 0x00000884,
/* SW2WBM release address */
.hal_wbm_release_ring_base_lsb = 0x000001ec,
/* WBM2SW release address */
.hal_wbm0_release_ring_base_lsb = 0x00000924,
.hal_wbm1_release_ring_base_lsb = 0x0000097c,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
};
static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = {
{.start_freq = 2402, .end_freq = 2482 }, /* 2G ch1~ch13 */
{.start_freq = 5150, .end_freq = 5250 }, /* 5G UNII-1 ch32~ch48 */
{.start_freq = 5250, .end_freq = 5725 }, /* 5G UNII-2 ch50~ch144 */
{.start_freq = 5725, .end_freq = 5810 }, /* 5G UNII-3 ch149~ch161 */
{.start_freq = 5815, .end_freq = 5895 }, /* 5G UNII-4 ch163~ch177 */
{.start_freq = 5925, .end_freq = 6165 }, /* 6G UNII-5 Ch1, Ch2 ~ Ch41 */
{.start_freq = 6165, .end_freq = 6425 }, /* 6G UNII-5 ch45~ch93 */
{.start_freq = 6425, .end_freq = 6525 }, /* 6G UNII-6 ch97~ch113 */
{.start_freq = 6525, .end_freq = 6705 }, /* 6G UNII-7 ch117~ch149 */
{.start_freq = 6705, .end_freq = 6875 }, /* 6G UNII-7 ch153~ch185 */
{.start_freq = 6875, .end_freq = 7125 }, /* 6G UNII-8 ch189~ch233 */
};
const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855 = {
.type = NL80211_SAR_TYPE_POWER,
.num_freq_ranges = (ARRAY_SIZE(ath11k_hw_sar_freq_ranges_wcn6855)),
.freq_ranges = ath11k_hw_sar_freq_ranges_wcn6855,
};
|
linux-master
|
drivers/net/wireless/ath/ath11k/hw.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <net/mac80211.h>
#include <net/cfg80211.h>
#include <linux/completion.h>
#include <linux/if_ether.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/uuid.h>
#include <linux/time.h>
#include <linux/of.h>
#include "core.h"
#include "debug.h"
#include "mac.h"
#include "hw.h"
#include "peer.h"
#include "testmode.h"
struct wmi_tlv_policy {
size_t min_len;
};
struct wmi_tlv_svc_ready_parse {
bool wmi_svc_bitmap_done;
};
struct wmi_tlv_dma_ring_caps_parse {
struct wmi_dma_ring_capabilities *dma_ring_caps;
u32 n_dma_ring_caps;
};
struct wmi_tlv_svc_rdy_ext_parse {
struct ath11k_service_ext_param param;
struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
struct wmi_hw_mode_capabilities *hw_mode_caps;
u32 n_hw_mode_caps;
u32 tot_phy_id;
struct wmi_hw_mode_capabilities pref_hw_mode_caps;
struct wmi_mac_phy_capabilities *mac_phy_caps;
u32 n_mac_phy_caps;
struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
u32 n_ext_hal_reg_caps;
struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
bool hw_mode_done;
bool mac_phy_done;
bool ext_hal_reg_done;
bool mac_phy_chainmask_combo_done;
bool mac_phy_chainmask_cap_done;
bool oem_dma_ring_cap_done;
bool dma_ring_cap_done;
};
struct wmi_tlv_svc_rdy_ext2_parse {
struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
bool dma_ring_cap_done;
};
struct wmi_tlv_rdy_parse {
u32 num_extra_mac_addr;
};
struct wmi_tlv_dma_buf_release_parse {
struct ath11k_wmi_dma_buf_release_fixed_param fixed;
struct wmi_dma_buf_release_entry *buf_entry;
struct wmi_dma_buf_release_meta_data *meta_data;
u32 num_buf_entry;
u32 num_meta;
bool buf_entry_done;
bool meta_data_done;
};
struct wmi_tlv_fw_stats_parse {
const struct wmi_stats_event *ev;
const struct wmi_per_chain_rssi_stats *rssi;
struct ath11k_fw_stats *stats;
int rssi_num;
bool chain_rssi_done;
};
struct wmi_tlv_mgmt_rx_parse {
const struct wmi_mgmt_rx_hdr *fixed;
const u8 *frame_buf;
bool frame_buf_done;
};
static const struct wmi_tlv_policy wmi_tlv_policies[] = {
[WMI_TAG_ARRAY_BYTE]
= { .min_len = 0 },
[WMI_TAG_ARRAY_UINT32]
= { .min_len = 0 },
[WMI_TAG_SERVICE_READY_EVENT]
= { .min_len = sizeof(struct wmi_service_ready_event) },
[WMI_TAG_SERVICE_READY_EXT_EVENT]
= { .min_len = sizeof(struct wmi_service_ready_ext_event) },
[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
= { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
[WMI_TAG_SOC_HAL_REG_CAPABILITIES]
= { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
[WMI_TAG_VDEV_START_RESPONSE_EVENT]
= { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
[WMI_TAG_PEER_DELETE_RESP_EVENT]
= { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
= { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
[WMI_TAG_VDEV_STOPPED_EVENT]
= { .min_len = sizeof(struct wmi_vdev_stopped_event) },
[WMI_TAG_REG_CHAN_LIST_CC_EVENT]
= { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]
= { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
[WMI_TAG_MGMT_RX_HDR]
= { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
[WMI_TAG_MGMT_TX_COMPL_EVENT]
= { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
[WMI_TAG_SCAN_EVENT]
= { .min_len = sizeof(struct wmi_scan_event) },
[WMI_TAG_PEER_STA_KICKOUT_EVENT]
= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
[WMI_TAG_ROAM_EVENT]
= { .min_len = sizeof(struct wmi_roam_event) },
[WMI_TAG_CHAN_INFO_EVENT]
= { .min_len = sizeof(struct wmi_chan_info_event) },
[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
= { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
= { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
[WMI_TAG_READY_EVENT] = {
.min_len = sizeof(struct wmi_ready_event_min) },
[WMI_TAG_SERVICE_AVAILABLE_EVENT]
= {.min_len = sizeof(struct wmi_service_available_event) },
[WMI_TAG_PEER_ASSOC_CONF_EVENT]
= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
[WMI_TAG_STATS_EVENT]
= { .min_len = sizeof(struct wmi_stats_event) },
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
[WMI_TAG_HOST_SWFDA_EVENT] = {
.min_len = sizeof(struct wmi_fils_discovery_event) },
[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
[WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
.min_len = sizeof(struct wmi_obss_color_collision_event) },
[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
.min_len = sizeof(struct wmi_11d_new_cc_ev) },
[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
.min_len = sizeof(struct wmi_twt_add_dialog_event) },
};
#define PRIMAP(_hw_mode_) \
[_hw_mode_] = _hw_mode_##_PRI
static const int ath11k_hw_mode_pri_map[] = {
PRIMAP(WMI_HOST_HW_MODE_SINGLE),
PRIMAP(WMI_HOST_HW_MODE_DBS),
PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
PRIMAP(WMI_HOST_HW_MODE_SBS),
PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
/* keep last */
PRIMAP(WMI_HOST_HW_MODE_MAX),
};
static int
ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
int (*iter)(struct ath11k_base *ab, u16 tag, u16 len,
const void *ptr, void *data),
void *data)
{
const void *begin = ptr;
const struct wmi_tlv *tlv;
u16 tlv_tag, tlv_len;
int ret;
while (len > 0) {
if (len < sizeof(*tlv)) {
ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
ptr - begin, len, sizeof(*tlv));
return -EINVAL;
}
tlv = ptr;
tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
ptr += sizeof(*tlv);
len -= sizeof(*tlv);
if (tlv_len > len) {
ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
wmi_tlv_policies[tlv_tag].min_len &&
wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
tlv_tag, ptr - begin, tlv_len,
wmi_tlv_policies[tlv_tag].min_len);
return -EINVAL;
}
ret = iter(ab, tlv_tag, tlv_len, ptr, data);
if (ret)
return ret;
ptr += tlv_len;
len -= tlv_len;
}
return 0;
}
static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len,
const void *ptr, void *data)
{
const void **tb = data;
if (tag < WMI_TAG_MAX)
tb[tag] = ptr;
return 0;
}
static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
const void *ptr, size_t len)
{
return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse,
(void *)tb);
}
const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
size_t len, gfp_t gfp)
{
const void **tb;
int ret;
tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
if (!tb)
return ERR_PTR(-ENOMEM);
ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
}
return tb;
}
static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id)
{
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_cmd_hdr *cmd_hdr;
int ret;
u32 cmd = 0;
if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
return -ENOMEM;
cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = cmd;
trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len);
memset(skb_cb, 0, sizeof(*skb_cb));
ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
if (ret)
goto err_pull;
return 0;
err_pull:
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
return ret;
}
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id)
{
struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
int ret = -EOPNOTSUPP;
struct ath11k_base *ab = wmi_sc->ab;
might_sleep();
if (ab->hw_params.credit_flow) {
wait_event_timeout(wmi_sc->tx_credits_wq, ({
ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
&wmi_sc->ab->dev_flags))
ret = -ESHUTDOWN;
(ret != -EAGAIN);
}), WMI_SEND_TIMEOUT_HZ);
} else {
wait_event_timeout(wmi->tx_ce_desc_wq, ({
ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
&wmi_sc->ab->dev_flags))
ret = -ESHUTDOWN;
(ret != -ENOBUFS);
}), WMI_SEND_TIMEOUT_HZ);
}
if (ret == -EAGAIN)
ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
if (ret == -ENOBUFS)
ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n",
cmd_id);
return ret;
}
static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
const void *ptr,
struct ath11k_service_ext_param *param)
{
const struct wmi_service_ready_ext_event *ev = ptr;
if (!ev)
return -EINVAL;
/* Move this to host based bitmap */
param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
param->default_fw_config_bits = ev->default_fw_config_bits;
param->he_cap_info = ev->he_cap_info;
param->mpdu_density = ev->mpdu_density;
param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
memcpy(¶m->ppet, &ev->ppet, sizeof(param->ppet));
return 0;
}
static int
ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
u8 hw_mode_id, u8 phy_id,
struct ath11k_pdev *pdev)
{
struct wmi_mac_phy_capabilities *mac_phy_caps;
struct ath11k_base *ab = wmi_handle->wmi_ab->ab;
struct ath11k_band_cap *cap_band;
struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
u32 phy_map;
u32 hw_idx, phy_idx = 0;
if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
return -EINVAL;
for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
break;
phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
while (phy_map) {
phy_map >>= 1;
phy_idx++;
}
}
if (hw_idx == hw_caps->num_hw_modes)
return -EINVAL;
phy_idx += phy_id;
if (phy_id >= hal_reg_caps->num_phy)
return -EINVAL;
mac_phy_caps = wmi_mac_phy_caps + phy_idx;
pdev->pdev_id = mac_phy_caps->pdev_id;
pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
ab->target_pdev_ids[ab->target_pdev_count].supported_bands =
mac_phy_caps->supported_bands;
ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
ab->target_pdev_count++;
if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
return -EINVAL;
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
* band to band for a single radio, need to see how this should be
* handled.
*/
if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
}
if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
pdev_cap->nss_ratio_enabled =
WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
pdev_cap->nss_ratio_info =
WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
}
/* tx/rx chainmask reported from fw depends on the actual hw chains used,
* For example, for 4x4 capable macphys, first 4 chains can be used for first
* mac and the remaining 4 chains can be used for the second mac or vice-versa.
* In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
* will be advertised for second mac or vice-versa. Compute the shift value
* for tx/rx chainmask which will be used to advertise supported ht/vht rates to
* mac80211.
*/
pdev_cap->tx_chain_mask_shift =
find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
pdev_cap->rx_chain_mask_shift =
find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
cap_band->phy_id = mac_phy_caps->phy_id;
cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
sizeof(struct ath11k_ppe_threshold));
}
if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
cap_band->phy_id = mac_phy_caps->phy_id;
cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
sizeof(struct ath11k_ppe_threshold));
cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
sizeof(struct ath11k_ppe_threshold));
}
return 0;
}
static int
ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle,
struct wmi_soc_hal_reg_capabilities *reg_caps,
struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
u8 phy_idx,
struct ath11k_hal_reg_capabilities_ext *param)
{
struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
if (!reg_caps || !wmi_ext_reg_cap)
return -EINVAL;
if (phy_idx >= reg_caps->num_phy)
return -EINVAL;
ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
param->phy_id = ext_reg_cap->phy_id;
param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
param->eeprom_reg_domain_ext =
ext_reg_cap->eeprom_reg_domain_ext;
param->regcap1 = ext_reg_cap->regcap1;
param->regcap2 = ext_reg_cap->regcap2;
/* check if param->wireless_mode is needed */
param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
return 0;
}
static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
const void *evt_buf,
struct ath11k_targ_cap *cap)
{
const struct wmi_service_ready_event *ev = evt_buf;
if (!ev) {
ath11k_err(ab, "%s: failed by NULL param\n",
__func__);
return -EINVAL;
}
cap->phy_capability = ev->phy_capability;
cap->max_frag_entry = ev->max_frag_entry;
cap->num_rf_chains = ev->num_rf_chains;
cap->ht_cap_info = ev->ht_cap_info;
cap->vht_cap_info = ev->vht_cap_info;
cap->vht_supp_mcs = ev->vht_supp_mcs;
cap->hw_min_tx_power = ev->hw_min_tx_power;
cap->hw_max_tx_power = ev->hw_max_tx_power;
cap->sys_cap_info = ev->sys_cap_info;
cap->min_pkt_size_enable = ev->min_pkt_size_enable;
cap->max_bcn_ie_size = ev->max_bcn_ie_size;
cap->max_num_scan_channels = ev->max_num_scan_channels;
cap->max_supported_macs = ev->max_supported_macs;
cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
cap->txrx_chainmask = ev->txrx_chainmask;
cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
cap->num_msdu_desc = ev->num_msdu_desc;
return 0;
}
/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
* wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
* 4-byte word.
*/
static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi,
const u32 *wmi_svc_bm)
{
int i, j;
for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
do {
if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
set_bit(j, wmi->wmi_ab->svc_map);
} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
}
}
static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_svc_ready_parse *svc_ready = data;
struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
u16 expect_len;
switch (tag) {
case WMI_TAG_SERVICE_READY_EVENT:
if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
return -EINVAL;
break;
case WMI_TAG_ARRAY_UINT32:
if (!svc_ready->wmi_svc_bitmap_done) {
expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
if (len < expect_len) {
ath11k_warn(ab, "invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
ath11k_wmi_service_bitmap_copy(wmi_handle, ptr);
svc_ready->wmi_svc_bitmap_done = true;
}
break;
default:
break;
}
return 0;
}
static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_tlv_svc_ready_parse svc_ready = { };
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_svc_rdy_parse,
&svc_ready);
if (ret) {
ath11k_warn(ab, "failed to parse tlv %d\n", ret);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready");
return 0;
}
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
{
struct sk_buff *skb;
struct ath11k_base *ab = wmi_sc->ab;
u32 round_len = roundup(len, 4);
skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
if (!skb)
return NULL;
skb_reserve(skb, WMI_SKB_HEADROOM);
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath11k_warn(ab, "unaligned WMI skb data\n");
skb_put(skb, round_len);
memset(skb->data, 0, round_len);
return skb;
}
static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar,
struct ieee80211_tx_info *info)
{
struct ath11k_base *ab = ar->ab;
u32 freq = 0;
if (ab->hw_params.support_off_channel_tx &&
ar->scan.is_roc &&
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
freq = ar->scan.roc_freq;
return freq;
}
int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_mgmt_send_cmd *cmd;
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
u32 buf_len;
int ret, len;
buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ?
frame->len : WMI_MGMT_SEND_DOWNLD_LEN;
len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_mgmt_send_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->desc_id = buf_id;
cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info);
cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
cmd->frame_len = frame->len;
cmd->buf_len = buf_len;
cmd->tx_params_valid = 0;
frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, buf_len);
memcpy(frame_tlv->value, frame->data, buf_len);
ath11k_ce_byte_swap(frame_tlv->value, buf_len);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd mgmt tx send");
return ret;
}
int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
struct vdev_create_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
struct wmi_vdev_txrx_streams *txrx_streams;
struct wmi_tlv *tlv;
int ret, len;
void *ptr;
/* It can be optimized my sending tx/rx chain configuration
* only for supported bands instead of always sending it for
* both the bands.
*/
len = sizeof(*cmd) + TLV_HDR_SIZE +
(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_create_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = param->if_id;
cmd->vdev_type = param->type;
cmd->vdev_subtype = param->subtype;
cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
cmd->pdev_id = param->pdev_id;
cmd->mbssid_flags = param->mbssid_flags;
cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id;
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
ptr = skb->data + sizeof(*cmd);
len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
txrx_streams = ptr;
len = sizeof(*txrx_streams);
txrx_streams->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
txrx_streams->supported_tx_streams =
param->chains[NL80211_BAND_2GHZ].tx;
txrx_streams->supported_rx_streams =
param->chains[NL80211_BAND_2GHZ].rx;
txrx_streams++;
txrx_streams->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
txrx_streams->supported_tx_streams =
param->chains[NL80211_BAND_5GHZ].tx;
txrx_streams->supported_rx_streams =
param->chains[NL80211_BAND_5GHZ].rx;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to submit WMI_VDEV_CREATE_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd vdev create id %d type %d subtype %d macaddr %pM pdevid %d\n",
param->if_id, param->type, param->subtype,
macaddr, param->pdev_id);
return ret;
}
int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_delete_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_delete_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev delete id %d\n", vdev_id);
return ret;
}
int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_stop_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_stop_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev stop id 0x%x\n", vdev_id);
return ret;
}
int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_down_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_down_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev down id 0x%x\n", vdev_id);
return ret;
}
static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
struct wmi_vdev_start_req_arg *arg)
{
u32 center_freq1 = arg->channel.band_center_freq1;
memset(chan, 0, sizeof(*chan));
chan->mhz = arg->channel.freq;
chan->band_center_freq1 = arg->channel.band_center_freq1;
if (arg->channel.mode == MODE_11AX_HE160) {
if (arg->channel.freq > arg->channel.band_center_freq1)
chan->band_center_freq1 = center_freq1 + 40;
else
chan->band_center_freq1 = center_freq1 - 40;
chan->band_center_freq2 = arg->channel.band_center_freq1;
} else if ((arg->channel.mode == MODE_11AC_VHT80_80) ||
(arg->channel.mode == MODE_11AX_HE80_80)) {
chan->band_center_freq2 = arg->channel.band_center_freq2;
} else {
chan->band_center_freq2 = 0;
}
chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
if (arg->channel.passive)
chan->info |= WMI_CHAN_INFO_PASSIVE;
if (arg->channel.allow_ibss)
chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
if (arg->channel.allow_ht)
chan->info |= WMI_CHAN_INFO_ALLOW_HT;
if (arg->channel.allow_vht)
chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
if (arg->channel.allow_he)
chan->info |= WMI_CHAN_INFO_ALLOW_HE;
if (arg->channel.ht40plus)
chan->info |= WMI_CHAN_INFO_HT40_PLUS;
if (arg->channel.chan_radar)
chan->info |= WMI_CHAN_INFO_DFS;
if (arg->channel.freq2_radar)
chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
arg->channel.max_power) |
FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
arg->channel.max_reg_power);
chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
arg->channel.max_antenna_gain) |
FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
arg->channel.max_power);
}
int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
struct wmi_channel *chan;
struct wmi_tlv *tlv;
void *ptr;
int ret, len;
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_VDEV_START_REQUEST_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = arg->vdev_id;
cmd->beacon_interval = arg->bcn_intval;
cmd->bcn_tx_rate = arg->bcn_tx_rate;
cmd->dtim_period = arg->dtim_period;
cmd->num_noa_descriptors = arg->num_noa_descriptors;
cmd->preferred_rx_streams = arg->pref_rx_streams;
cmd->preferred_tx_streams = arg->pref_tx_streams;
cmd->cac_duration_ms = arg->cac_duration_ms;
cmd->regdomain = arg->regdomain;
cmd->he_ops = arg->he_ops;
cmd->mbssid_flags = arg->mbssid_flags;
cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id;
if (!restart) {
if (arg->ssid) {
cmd->ssid.ssid_len = arg->ssid_len;
memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
}
if (arg->hidden_ssid)
cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
if (arg->pmf_enabled)
cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
}
cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
ptr = skb->data + sizeof(*cmd);
chan = ptr;
ath11k_wmi_put_wmi_channel(chan, arg);
chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
FIELD_PREP(WMI_TLV_LEN,
sizeof(*chan) - TLV_HDR_SIZE);
ptr += sizeof(*chan);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, 0);
/* Note: This is a nested TLV containing:
* [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
if (restart)
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_VDEV_RESTART_REQUEST_CMDID);
else
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_VDEV_START_REQUEST_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
restart ? "restart" : "start");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev %s id 0x%x freq 0x%x mode 0x%x\n",
restart ? "restart" : "start", arg->vdev_id,
arg->channel.freq, arg->channel.mode);
return ret;
}
int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid,
u8 *tx_bssid, u32 nontx_profile_idx, u32 nontx_profile_cnt)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
struct ieee80211_bss_conf *bss_conf;
struct ath11k_vif *arvif;
struct sk_buff *skb;
int ret;
arvif = ath11k_mac_get_arvif(ar, vdev_id);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_up_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->vdev_assoc_id = aid;
ether_addr_copy(cmd->vdev_bssid.addr, bssid);
cmd->nontx_profile_idx = nontx_profile_idx;
cmd->nontx_profile_cnt = nontx_profile_cnt;
if (tx_bssid)
ether_addr_copy(cmd->tx_vdev_bssid.addr, tx_bssid);
if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
bss_conf = &arvif->vif->bss_conf;
if (bss_conf->nontransmitted) {
ether_addr_copy(cmd->tx_vdev_bssid.addr,
bss_conf->transmitter_bssid);
cmd->nontx_profile_idx = bss_conf->bssid_index;
cmd->nontx_profile_cnt = bss_conf->bssid_indicator;
}
}
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd vdev up id 0x%x assoc id %d bssid %pM\n",
vdev_id, aid, bssid);
return ret;
}
int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
struct peer_create_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_create_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr);
cmd->peer_type = param->peer_type;
cmd->vdev_id = param->vdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd peer create vdev_id %d peer_addr %pM\n",
param->vdev_id, param->peer_addr);
return ret;
}
int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
const u8 *peer_addr, u8 vdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_peer_delete_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_delete_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
cmd->vdev_id = vdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd peer delete vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
return ret;
}
int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
struct pdev_set_regdomain_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_set_regdomain_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SET_REGDOMAIN_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->reg_domain = param->current_rd_in_use;
cmd->reg_domain_2g = param->current_rd_2g;
cmd->reg_domain_5g = param->current_rd_5g;
cmd->conformance_test_limit_2g = param->ctl_2g;
cmd->conformance_test_limit_5g = param->ctl_5g;
cmd->dfs_domain = param->dfs_domain;
cmd->pdev_id = param->pdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
param->current_rd_in_use, param->current_rd_2g,
param->current_rd_5g, param->dfs_domain, param->pdev_id);
return ret;
}
int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
u32 vdev_id, u32 param_id, u32 param_val)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_peer_set_param_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_set_param_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
cmd->vdev_id = vdev_id;
cmd->param_id = param_id;
cmd->param_value = param_val;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd peer set param vdev %d peer 0x%pM set param %d value %d\n",
vdev_id, peer_addr, param_id, param_val);
return ret;
}
int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
u8 peer_addr[ETH_ALEN],
struct peer_flush_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_peer_flush_tids_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
cmd->peer_tid_bitmap = param->peer_tid_bitmap;
cmd->vdev_id = param->vdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_PEER_FLUSH_TIDS cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd peer flush tids vdev_id %d peer_addr %pM tids %08x\n",
param->vdev_id, peer_addr, param->peer_tid_bitmap);
return ret;
}
int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar,
int vdev_id, const u8 *addr,
dma_addr_t paddr, u8 tid,
u8 ba_window_size_valid,
u32 ba_window_size)
{
struct wmi_peer_reorder_queue_setup_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
ether_addr_copy(cmd->peer_macaddr.addr, addr);
cmd->vdev_id = vdev_id;
cmd->tid = tid;
cmd->queue_ptr_lo = lower_32_bits(paddr);
cmd->queue_ptr_hi = upper_32_bits(paddr);
cmd->queue_no = tid;
cmd->ba_window_size_valid = ba_window_size_valid;
cmd->ba_window_size = ba_window_size;
ret = ath11k_wmi_cmd_send(ar->wmi, skb,
WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd peer reorder queue setup addr %pM vdev_id %d tid %d\n",
addr, vdev_id, tid);
return ret;
}
int
ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
struct rx_reorder_queue_remove_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_peer_reorder_queue_remove_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_REORDER_QUEUE_REMOVE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr);
cmd->vdev_id = param->vdev_id;
cmd->tid_mask = param->peer_tid_bitmap;
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd peer reorder queue remove peer_macaddr %pM vdev_id %d tid_map %d",
param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap);
return ret;
}
int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
u32 param_value, u8 pdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_set_param_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
cmd->param_id = param_id;
cmd->param_value = param_value;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev set param %d pdev id %d value %d\n",
param_id, pdev_id, param_value);
return ret;
}
int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
enum wmi_sta_ps_mode psmode)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_set_ps_mode_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->sta_ps_mode = psmode;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd sta powersave mode psmode %d vdev id %d\n",
psmode, vdev_id);
return ret;
}
int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
u32 pdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_suspend_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->suspend_opt = suspend_opt;
cmd->pdev_id = pdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev suspend pdev_id %d\n", pdev_id);
return ret;
}
int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_resume_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_resume_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev resume pdev id %d\n", pdev_id);
return ret;
}
/* TODO FW Support for the cmd is not available yet.
* Can be tested once the command and corresponding
* event is implemented in FW
*/
int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
enum wmi_bss_chan_info_req_type type)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_bss_chan_info_req_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->req_type = type;
cmd->pdev_id = ar->pdev->pdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev bss chan info request type %d\n", type);
return ret;
}
int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
struct ap_ps_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_ap_ps_peer_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = param->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
cmd->param = param->param;
cmd->value = param->value;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd ap ps peer param vdev id %d peer %pM param %d value %d\n",
param->vdev_id, peer_addr, param->param, param->value);
return ret;
}
int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
u32 param, u32 param_value)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_sta_powersave_param_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->param = param;
cmd->value = param_value;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd set powersave param vdev_id %d param %d value %d\n",
vdev_id, param, param_value);
return ret;
}
int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_force_fw_hang_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->type = type;
cmd->delay_time_ms = delay_time_ms;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
if (ret) {
ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd force fw hang");
return ret;
}
int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
u32 param_id, u32 param_value)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_set_param_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->param_id = param_id;
cmd->param_value = param_value;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_VDEV_SET_PARAM_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd vdev set param vdev 0x%x param %d value %d\n",
vdev_id, param_id, param_value);
return ret;
}
int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
struct stats_request_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_request_stats_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_request_stats_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->stats_id = param->stats_id;
cmd->vdev_id = param->vdev_id;
cmd->pdev_id = param->pdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd request stats 0x%x vdev id %d pdev id %d\n",
param->stats_id, param->vdev_id, param->pdev_id);
return ret;
}
int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_get_pdev_temperature_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
return ret;
}
int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
u32 vdev_id, u32 bcn_ctrl_op)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_bcn_offload_ctrl_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_BCN_OFFLOAD_CTRL_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->bcn_ctrl_op = bcn_ctrl_op;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd bcn offload ctrl vdev id %d ctrl_op %d\n",
vdev_id, bcn_ctrl_op);
return ret;
}
int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn, u32 ema_params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_bcn_tmpl_cmd *cmd;
struct wmi_bcn_prb_info *bcn_prb_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
struct ieee80211_vif *vif;
struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id);
if (!arvif) {
ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id);
return -EINVAL;
}
vif = arvif->vif;
len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
if (vif->bss_conf.csa_active) {
cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
}
cmd->buf_len = bcn->len;
cmd->mbssid_ie_offset = offs->mbssid_off;
cmd->ema_params = ema_params;
ptr = skb->data + sizeof(*cmd);
bcn_prb_info = ptr;
len = sizeof(*bcn_prb_info);
bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_BCN_PRB_INFO) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
bcn_prb_info->caps = 0;
bcn_prb_info->erp = 0;
ptr += sizeof(*bcn_prb_info);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, aligned_len);
memcpy(tlv->value, bcn->data, bcn->len);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bcn tmpl");
return ret;
}
int ath11k_wmi_vdev_install_key(struct ath11k *ar,
struct wmi_vdev_install_key_arg *arg)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_install_key_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
int ret, len;
int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = arg->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
cmd->key_idx = arg->key_idx;
cmd->key_flags = arg->key_flags;
cmd->key_cipher = arg->key_cipher;
cmd->key_len = arg->key_len;
cmd->key_txmic_len = arg->key_txmic_len;
cmd->key_rxmic_len = arg->key_rxmic_len;
if (arg->key_rsc_counter)
memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
sizeof(struct wmi_key_seq_counter));
tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
if (arg->key_data)
memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_VDEV_INSTALL_KEY cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd vdev install key idx %d cipher %d len %d\n",
arg->key_idx, arg->key_cipher, arg->key_len);
return ret;
}
static inline void
ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
struct peer_assoc_params *param,
bool hw_crypto_disabled)
{
cmd->peer_flags = 0;
if (param->is_wme_set) {
if (param->qos_flag)
cmd->peer_flags |= WMI_PEER_QOS;
if (param->apsd_flag)
cmd->peer_flags |= WMI_PEER_APSD;
if (param->ht_flag)
cmd->peer_flags |= WMI_PEER_HT;
if (param->bw_40)
cmd->peer_flags |= WMI_PEER_40MHZ;
if (param->bw_80)
cmd->peer_flags |= WMI_PEER_80MHZ;
if (param->bw_160)
cmd->peer_flags |= WMI_PEER_160MHZ;
/* Typically if STBC is enabled for VHT it should be enabled
* for HT as well
**/
if (param->stbc_flag)
cmd->peer_flags |= WMI_PEER_STBC;
/* Typically if LDPC is enabled for VHT it should be enabled
* for HT as well
**/
if (param->ldpc_flag)
cmd->peer_flags |= WMI_PEER_LDPC;
if (param->static_mimops_flag)
cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
if (param->dynamic_mimops_flag)
cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
if (param->spatial_mux_flag)
cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
if (param->vht_flag)
cmd->peer_flags |= WMI_PEER_VHT;
if (param->he_flag)
cmd->peer_flags |= WMI_PEER_HE;
if (param->twt_requester)
cmd->peer_flags |= WMI_PEER_TWT_REQ;
if (param->twt_responder)
cmd->peer_flags |= WMI_PEER_TWT_RESP;
}
/* Suppress authorization for all AUTH modes that need 4-way handshake
* (during re-association).
* Authorization will be done for these modes on key installation.
*/
if (param->auth_flag)
cmd->peer_flags |= WMI_PEER_AUTH;
if (param->need_ptk_4_way) {
cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
if (!hw_crypto_disabled && param->is_assoc)
cmd->peer_flags &= ~WMI_PEER_AUTH;
}
if (param->need_gtk_2_way)
cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
/* safe mode bypass the 4-way handshake */
if (param->safe_mode_enabled)
cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
WMI_PEER_NEED_GTK_2_WAY);
if (param->is_pmf_enabled)
cmd->peer_flags |= WMI_PEER_PMF;
/* Disable AMSDU for station transmit, if user configures it */
/* Disable AMSDU for AP transmit to 11n Stations, if user configures
* it
* if (param->amsdu_disable) Add after FW support
**/
/* Target asserts if node is marked HT and all MCS is set to 0.
* Mark the node as non-HT if all the mcs rates are disabled through
* iwpriv
**/
if (param->peer_ht_rates.num_rates == 0)
cmd->peer_flags &= ~WMI_PEER_HT;
}
int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
struct peer_assoc_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_peer_assoc_complete_cmd *cmd;
struct wmi_vht_rate_set *mcs;
struct wmi_he_rate_set *he_mcs;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 peer_legacy_rates_align;
u32 peer_ht_rates_align;
int i, ret, len;
peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
sizeof(u32));
peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
sizeof(u32));
len = sizeof(*cmd) +
TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
sizeof(*mcs) + TLV_HDR_SIZE +
(sizeof(*he_mcs) * param->peer_he_mcs_count);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
ptr = skb->data;
cmd = ptr;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = param->vdev_id;
cmd->peer_new_assoc = param->peer_new_assoc;
cmd->peer_associd = param->peer_associd;
ath11k_wmi_copy_peer_flags(cmd, param,
test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED,
&ar->ab->dev_flags));
ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac);
cmd->peer_rate_caps = param->peer_rate_caps;
cmd->peer_caps = param->peer_caps;
cmd->peer_listen_intval = param->peer_listen_intval;
cmd->peer_ht_caps = param->peer_ht_caps;
cmd->peer_max_mpdu = param->peer_max_mpdu;
cmd->peer_mpdu_density = param->peer_mpdu_density;
cmd->peer_vht_caps = param->peer_vht_caps;
cmd->peer_phymode = param->peer_phymode;
/* Update 11ax capabilities */
cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
cmd->peer_he_ops = param->peer_he_ops;
memcpy(&cmd->peer_he_cap_phy, ¶m->peer_he_cap_phyinfo,
sizeof(param->peer_he_cap_phyinfo));
memcpy(&cmd->peer_ppet, ¶m->peer_ppet,
sizeof(param->peer_ppet));
/* Update peer legacy rate information */
ptr += sizeof(*cmd);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
ptr += TLV_HDR_SIZE;
cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
memcpy(ptr, param->peer_legacy_rates.rates,
param->peer_legacy_rates.num_rates);
/* Update peer HT rate information */
ptr += peer_legacy_rates_align;
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
ptr += TLV_HDR_SIZE;
cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
memcpy(ptr, param->peer_ht_rates.rates,
param->peer_ht_rates.num_rates);
/* VHT Rates */
ptr += peer_ht_rates_align;
mcs = ptr;
mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
cmd->peer_nss = param->peer_nss;
/* Update bandwidth-NSS mapping */
cmd->peer_bw_rxnss_override = 0;
cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
if (param->vht_capable) {
mcs->rx_max_rate = param->rx_max_rate;
mcs->rx_mcs_set = param->rx_mcs_set;
mcs->tx_max_rate = param->tx_max_rate;
mcs->tx_mcs_set = param->tx_mcs_set;
}
/* HE Rates */
cmd->peer_he_mcs = param->peer_he_mcs_count;
cmd->min_data_rate = param->min_data_rate;
ptr += sizeof(*mcs);
len = param->peer_he_mcs_count * sizeof(*he_mcs);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
/* Loop through the HE rate set */
for (i = 0; i < param->peer_he_mcs_count; i++) {
he_mcs = ptr;
he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_HE_RATE_SET) |
FIELD_PREP(WMI_TLV_LEN,
sizeof(*he_mcs) - TLV_HDR_SIZE);
he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
ptr += sizeof(*he_mcs);
}
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_PEER_ASSOC_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
cmd->vdev_id, cmd->peer_associd, param->peer_mac,
cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
cmd->peer_listen_intval, cmd->peer_ht_caps,
cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
cmd->peer_mpdu_density,
cmd->peer_vht_caps, cmd->peer_he_cap_info,
cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
cmd->peer_he_cap_phy[2],
cmd->peer_bw_rxnss_override);
return ret;
}
void ath11k_wmi_start_scan_init(struct ath11k *ar,
struct scan_req_params *arg)
{
/* setup commonly used values */
arg->scan_req_id = 1;
if (ar->state_11d == ATH11K_11D_PREPARING)
arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
else
arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
arg->dwell_time_active_6g = 40;
arg->dwell_time_passive_6g = 30;
arg->min_rest_time = 50;
arg->max_rest_time = 500;
arg->repeat_probe_time = 0;
arg->probe_spacing_time = 0;
arg->idle_time = 0;
arg->max_scan_time = 20000;
arg->probe_delay = 5;
arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
WMI_SCAN_EVENT_COMPLETED |
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE,
ar->ab->wmi_ab.svc_map))
arg->scan_ctrl_flags_ext |=
WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
arg->num_bssid = 1;
/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
* ZEROs in probe request
*/
eth_broadcast_addr(arg->bssid_list[0].addr);
}
static inline void
ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
struct scan_req_params *param)
{
/* Scan events subscription */
if (param->scan_ev_started)
cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED;
if (param->scan_ev_completed)
cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED;
if (param->scan_ev_bss_chan)
cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL;
if (param->scan_ev_foreign_chan)
cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN;
if (param->scan_ev_dequeued)
cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED;
if (param->scan_ev_preempted)
cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED;
if (param->scan_ev_start_failed)
cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED;
if (param->scan_ev_restarted)
cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED;
if (param->scan_ev_foreign_chn_exit)
cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
if (param->scan_ev_suspended)
cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED;
if (param->scan_ev_resumed)
cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED;
/** Set scan control flags */
cmd->scan_ctrl_flags = 0;
if (param->scan_f_passive)
cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
if (param->scan_f_strict_passive_pch)
cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
if (param->scan_f_promisc_mode)
cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS;
if (param->scan_f_capture_phy_err)
cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR;
if (param->scan_f_half_rate)
cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
if (param->scan_f_quarter_rate)
cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
if (param->scan_f_cck_rates)
cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
if (param->scan_f_ofdm_rates)
cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
if (param->scan_f_chan_stat_evnt)
cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
if (param->scan_f_filter_prb_req)
cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
if (param->scan_f_bcast_probe)
cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ;
if (param->scan_f_offchan_mgmt_tx)
cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX;
if (param->scan_f_offchan_data_tx)
cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX;
if (param->scan_f_force_active_dfs_chn)
cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
if (param->scan_f_add_tpc_ie_in_probe)
cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
if (param->scan_f_add_ds_ie_in_probe)
cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
if (param->scan_f_add_spoofed_mac_in_probe)
cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
if (param->scan_f_add_rand_seq_in_probe)
cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
if (param->scan_f_en_ie_whitelist_in_probe)
cmd->scan_ctrl_flags |=
WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
/* for adaptive scan mode using 3 bits (21 - 23 bits) */
WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
param->adaptive_dwell_time_mode);
cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext;
}
int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
struct scan_req_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_start_scan_cmd *cmd;
struct wmi_ssid *ssid = NULL;
struct wmi_mac_addr *bssid;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
int i, ret, len;
u32 *tmp_ptr;
u16 extraie_len_with_pad = 0;
struct hint_short_ssid *s_ssid = NULL;
struct hint_bssid *hint_bssid = NULL;
len = sizeof(*cmd);
len += TLV_HDR_SIZE;
if (params->num_chan)
len += params->num_chan * sizeof(u32);
len += TLV_HDR_SIZE;
if (params->num_ssids)
len += params->num_ssids * sizeof(*ssid);
len += TLV_HDR_SIZE;
if (params->num_bssid)
len += sizeof(*bssid) * params->num_bssid;
len += TLV_HDR_SIZE;
if (params->extraie.len && params->extraie.len <= 0xFFFF)
extraie_len_with_pad =
roundup(params->extraie.len, sizeof(u32));
len += extraie_len_with_pad;
if (params->num_hint_bssid)
len += TLV_HDR_SIZE +
params->num_hint_bssid * sizeof(struct hint_bssid);
if (params->num_hint_s_ssid)
len += TLV_HDR_SIZE +
params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
ptr = skb->data;
cmd = ptr;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->scan_id = params->scan_id;
cmd->scan_req_id = params->scan_req_id;
cmd->vdev_id = params->vdev_id;
cmd->scan_priority = params->scan_priority;
cmd->notify_scan_events = params->notify_scan_events;
ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params);
cmd->dwell_time_active = params->dwell_time_active;
cmd->dwell_time_active_2g = params->dwell_time_active_2g;
cmd->dwell_time_passive = params->dwell_time_passive;
cmd->dwell_time_active_6g = params->dwell_time_active_6g;
cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
cmd->min_rest_time = params->min_rest_time;
cmd->max_rest_time = params->max_rest_time;
cmd->repeat_probe_time = params->repeat_probe_time;
cmd->probe_spacing_time = params->probe_spacing_time;
cmd->idle_time = params->idle_time;
cmd->max_scan_time = params->max_scan_time;
cmd->probe_delay = params->probe_delay;
cmd->burst_duration = params->burst_duration;
cmd->num_chan = params->num_chan;
cmd->num_bssid = params->num_bssid;
cmd->num_ssids = params->num_ssids;
cmd->ie_len = params->extraie.len;
cmd->n_probes = params->n_probes;
ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr);
ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr);
ptr += sizeof(*cmd);
len = params->num_chan * sizeof(u32);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
tmp_ptr = (u32 *)ptr;
for (i = 0; i < params->num_chan; ++i)
tmp_ptr[i] = params->chan_list[i];
ptr += len;
len = params->num_ssids * sizeof(*ssid);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
if (params->num_ssids) {
ssid = ptr;
for (i = 0; i < params->num_ssids; ++i) {
ssid->ssid_len = params->ssid[i].length;
memcpy(ssid->ssid, params->ssid[i].ssid,
params->ssid[i].length);
ssid++;
}
}
ptr += (params->num_ssids * sizeof(*ssid));
len = params->num_bssid * sizeof(*bssid);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
bssid = ptr;
if (params->num_bssid) {
for (i = 0; i < params->num_bssid; ++i) {
ether_addr_copy(bssid->addr,
params->bssid_list[i].addr);
bssid++;
}
}
ptr += params->num_bssid * sizeof(*bssid);
len = extraie_len_with_pad;
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
if (extraie_len_with_pad)
memcpy(ptr, params->extraie.ptr,
params->extraie.len);
ptr += extraie_len_with_pad;
if (params->num_hint_s_ssid) {
len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
s_ssid = ptr;
for (i = 0; i < params->num_hint_s_ssid; ++i) {
s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
s_ssid++;
}
ptr += len;
}
if (params->num_hint_bssid) {
len = params->num_hint_bssid * sizeof(struct hint_bssid);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
hint_bssid = ptr;
for (i = 0; i < params->num_hint_bssid; ++i) {
hint_bssid->freq_flags =
params->hint_bssid[i].freq_flags;
ether_addr_copy(¶ms->hint_bssid[i].bssid.addr[0],
&hint_bssid->bssid.addr[0]);
hint_bssid++;
}
}
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_START_SCAN_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd start scan");
return ret;
}
int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
struct scan_cancel_param *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_stop_scan_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_stop_scan_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = param->vdev_id;
cmd->requestor = param->requester;
cmd->scan_id = param->scan_id;
cmd->pdev_id = param->pdev_id;
/* stop the scan with the corresponding scan_id */
if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
/* Cancelling all scans */
cmd->req_type = WMI_SCAN_STOP_ALL;
} else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
/* Cancelling VAP scans */
cmd->req_type = WMI_SCN_STOP_VAP_ALL;
} else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
/* Cancelling specific scan */
cmd->req_type = WMI_SCAN_STOP_ONE;
} else {
ath11k_warn(ar->ab, "invalid scan cancel param %d",
param->req_type);
dev_kfree_skb(skb);
return -EINVAL;
}
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_STOP_SCAN_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd stop scan");
return ret;
}
int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
struct scan_chan_list_params *chan_list)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_scan_chan_list_cmd *cmd;
struct sk_buff *skb;
struct wmi_channel *chan_info;
struct channel_param *tchan_info;
struct wmi_tlv *tlv;
void *ptr;
int i, ret, len;
u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
u32 *reg1, *reg2;
tchan_info = chan_list->ch_param;
while (chan_list->nallchans) {
len = sizeof(*cmd) + TLV_HDR_SIZE;
max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
sizeof(*chan_info);
if (chan_list->nallchans > max_chan_limit)
num_send_chans = max_chan_limit;
else
num_send_chans = chan_list->nallchans;
chan_list->nallchans -= num_send_chans;
len += sizeof(*chan_info) * num_send_chans;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = chan_list->pdev_id;
cmd->num_scan_chans = num_send_chans;
if (num_sends)
cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
num_send_chans, len, cmd->pdev_id, num_sends);
ptr = skb->data + sizeof(*cmd);
len = sizeof(*chan_info) * num_send_chans;
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
ptr += TLV_HDR_SIZE;
for (i = 0; i < num_send_chans; ++i) {
chan_info = ptr;
memset(chan_info, 0, sizeof(*chan_info));
len = sizeof(*chan_info);
chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_CHANNEL) |
FIELD_PREP(WMI_TLV_LEN,
len - TLV_HDR_SIZE);
reg1 = &chan_info->reg_info_1;
reg2 = &chan_info->reg_info_2;
chan_info->mhz = tchan_info->mhz;
chan_info->band_center_freq1 = tchan_info->cfreq1;
chan_info->band_center_freq2 = tchan_info->cfreq2;
if (tchan_info->is_chan_passive)
chan_info->info |= WMI_CHAN_INFO_PASSIVE;
if (tchan_info->allow_he)
chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
else if (tchan_info->allow_vht)
chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
else if (tchan_info->allow_ht)
chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
if (tchan_info->half_rate)
chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
if (tchan_info->quarter_rate)
chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
if (tchan_info->psc_channel)
chan_info->info |= WMI_CHAN_INFO_PSC;
if (tchan_info->dfs_set)
chan_info->info |= WMI_CHAN_INFO_DFS;
chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
tchan_info->phy_mode);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
tchan_info->minpower);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
tchan_info->maxpower);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
tchan_info->maxregpower);
*reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
tchan_info->reg_class_id);
*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
tchan_info->antennamax);
*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
tchan_info->maxregpower);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"chan scan list chan[%d] = %u, chan_info->info %8x\n",
i, chan_info->mhz, chan_info->info);
ptr += sizeof(*chan_info);
tchan_info++;
}
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd scan chan list channels %d",
num_send_chans);
num_sends++;
}
return 0;
}
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
struct wmi_wmm_params_all_arg *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_set_wmm_params_cmd *cmd;
struct wmi_wmm_params *wmm_param;
struct wmi_wmm_params_arg *wmi_wmm_arg;
struct sk_buff *skb;
int ret, ac;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->wmm_param_type = 0;
for (ac = 0; ac < WME_NUM_AC; ac++) {
switch (ac) {
case WME_AC_BE:
wmi_wmm_arg = ¶m->ac_be;
break;
case WME_AC_BK:
wmi_wmm_arg = ¶m->ac_bk;
break;
case WME_AC_VI:
wmi_wmm_arg = ¶m->ac_vi;
break;
case WME_AC_VO:
wmi_wmm_arg = ¶m->ac_vo;
break;
}
wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
wmm_param->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
FIELD_PREP(WMI_TLV_LEN,
sizeof(*wmm_param) - TLV_HDR_SIZE);
wmm_param->aifs = wmi_wmm_arg->aifs;
wmm_param->cwmin = wmi_wmm_arg->cwmin;
wmm_param->cwmax = wmi_wmm_arg->cwmax;
wmm_param->txoplimit = wmi_wmm_arg->txop;
wmm_param->acm = wmi_wmm_arg->acm;
wmm_param->no_ack = wmi_wmm_arg->no_ack;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
ac, wmm_param->aifs, wmm_param->cwmin,
wmm_param->cwmax, wmm_param->txoplimit,
wmm_param->acm, wmm_param->no_ack);
}
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_VDEV_SET_WMM_PARAMS_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev set wmm params");
return ret;
}
int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
u32 pdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_dfs_phyerr_offload_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev dfs phyerr offload enable pdev id %d\n", pdev_id);
return ret;
}
int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_delba_send_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_delba_send_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, mac);
cmd->tid = tid;
cmd->initiator = initiator;
cmd->reasoncode = reason;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_DELBA_SEND_CMDID cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
vdev_id, mac, tid, initiator, reason);
return ret;
}
int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 status)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_addba_setresponse_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, mac);
cmd->tid = tid;
cmd->statuscode = status;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
vdev_id, mac, tid, status);
return ret;
}
int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 buf_size)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_addba_send_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_addba_send_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, mac);
cmd->tid = tid;
cmd->buffersize = buf_size;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_ADDBA_SEND_CMDID cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
vdev_id, mac, tid, buf_size);
return ret;
}
int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_addba_clear_resp_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, mac);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd addba clear resp vdev_id 0x%X mac_addr %pM\n",
vdev_id, mac);
return ret;
}
int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_pktlog_filter_cmd *cmd;
struct wmi_pdev_pktlog_filter_info *info;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
int ret, len;
len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
cmd->num_mac = 1;
cmd->enable = enable;
ptr = skb->data + sizeof(*cmd);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*info));
ptr += TLV_HDR_SIZE;
info = ptr;
ether_addr_copy(info->peer_macaddr.addr, addr);
info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) |
FIELD_PREP(WMI_TLV_LEN,
sizeof(*info) - TLV_HDR_SIZE);
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_PKTLOG_FILTER_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog filter");
return ret;
}
int
ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
struct wmi_init_country_params init_cc_params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_init_country_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_init_country_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_SET_INIT_COUNTRY_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
switch (init_cc_params.flags) {
case ALPHA_IS_SET:
cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
memcpy((u8 *)&cmd->cc_info.alpha2,
init_cc_params.cc_info.alpha2, 3);
break;
case CC_IS_SET:
cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE;
cmd->cc_info.country_code = init_cc_params.cc_info.country_code;
break;
case REGDMN_IS_SET:
cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN;
cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id;
break;
default:
ath11k_warn(ar->ab, "unknown cc params flags: 0x%x",
init_cc_params.flags);
ret = -EINVAL;
goto err;
}
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_SET_INIT_COUNTRY_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
ret);
goto err;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set init country");
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
struct wmi_set_current_country_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_set_current_country_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_set_current_country_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(&cmd->new_alpha2, ¶m->alpha2, 3);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd set current country pdev id %d alpha2 %c%c\n",
ar->pdev->pdev_id,
param->alpha2[0],
param->alpha2[1]);
return ret;
}
int
ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
struct thermal_mitigation_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_therm_throt_config_request_cmd *cmd;
struct wmi_therm_throt_level_config_info *lvl_conf;
struct wmi_tlv *tlv;
struct sk_buff *skb;
int i, ret, len;
len = sizeof(*cmd) + TLV_HDR_SIZE +
THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
cmd->enable = param->enable;
cmd->dc = param->dc;
cmd->dc_per_event = param->dc_per_event;
cmd->therm_throt_levels = THERMAL_LEVELS;
tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN,
(THERMAL_LEVELS *
sizeof(struct wmi_therm_throt_level_config_info)));
lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data +
sizeof(*cmd) +
TLV_HDR_SIZE);
for (i = 0; i < THERMAL_LEVELS; i++) {
lvl_conf->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE);
lvl_conf->temp_lwm = param->levelconf[i].tmplwm;
lvl_conf->temp_hwm = param->levelconf[i].tmphwm;
lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent;
lvl_conf->prio = param->levelconf[i].priority;
lvl_conf++;
}
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd therm throt set conf pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
ar->pdev->pdev_id, param->enable, param->dc,
param->dc_per_event, THERMAL_LEVELS);
return ret;
}
int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
struct wmi_11d_scan_start_params *param)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_11d_scan_start_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = param->vdev_id;
cmd->scan_period_msec = param->scan_period_msec;
cmd->start_interval_msec = param->start_interval_msec;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd 11d scan start vdev id %d period %d ms internal %d ms\n",
cmd->vdev_id,
cmd->scan_period_msec,
cmd->start_interval_msec);
return ret;
}
int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_11d_scan_stop_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd 11d scan stop vdev id %d\n",
cmd->vdev_id);
return ret;
}
int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pktlog_enable_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pktlog_enable_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
cmd->evlist = pktlog_filter;
cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE;
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_PKTLOG_ENABLE_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog enable");
return ret;
}
int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pktlog_disable_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pktlog_disable_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_PKTLOG_DISABLE_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog disable");
return ret;
}
void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params)
{
twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
twt_params->congestion_thresh_teardown =
ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
twt_params->congestion_thresh_critical =
ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
twt_params->interference_thresh_teardown =
ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
twt_params->interference_thresh_setup =
ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
twt_params->remove_sta_slot_interval =
ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
/* TODO add MBSSID support */
twt_params->mbss_support = 0;
}
int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
struct wmi_twt_enable_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_enable_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
cmd->sta_cong_timer_ms = params->sta_cong_timer_ms;
cmd->default_slot_size = params->default_slot_size;
cmd->congestion_thresh_setup = params->congestion_thresh_setup;
cmd->congestion_thresh_teardown = params->congestion_thresh_teardown;
cmd->congestion_thresh_critical = params->congestion_thresh_critical;
cmd->interference_thresh_teardown = params->interference_thresh_teardown;
cmd->interference_thresh_setup = params->interference_thresh_setup;
cmd->min_no_sta_setup = params->min_no_sta_setup;
cmd->min_no_sta_teardown = params->min_no_sta_teardown;
cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots;
cmd->min_no_twt_slots = params->min_no_twt_slots;
cmd->max_no_sta_twt = params->max_no_sta_twt;
cmd->mode_check_interval = params->mode_check_interval;
cmd->add_sta_slot_interval = params->add_sta_slot_interval;
cmd->remove_sta_slot_interval = params->remove_sta_slot_interval;
cmd->mbss_support = params->mbss_support;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
dev_kfree_skb(skb);
return ret;
}
ar->twt_enabled = 1;
ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt enable");
return 0;
}
int
ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_disable_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt disable");
ar->twt_enabled = 0;
return 0;
}
int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
struct wmi_twt_add_dialog_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_add_dialog_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = params->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
cmd->dialog_id = params->dialog_id;
cmd->wake_intvl_us = params->wake_intvl_us;
cmd->wake_intvl_mantis = params->wake_intvl_mantis;
cmd->wake_dura_us = params->wake_dura_us;
cmd->sp_offset_us = params->sp_offset_us;
cmd->flags = params->twt_cmd;
if (params->flag_bcast)
cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
if (params->flag_trigger)
cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
if (params->flag_flow_type)
cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
if (params->flag_protection)
cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send wmi command to add twt dialog: %d",
ret);
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd twt add dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
cmd->flags);
return 0;
}
int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
struct wmi_twt_del_dialog_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_del_dialog_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = params->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
cmd->dialog_id = params->dialog_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send wmi command to delete twt dialog: %d",
ret);
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd twt del dialog vdev %u dialog id %u\n",
cmd->vdev_id, cmd->dialog_id);
return 0;
}
int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
struct wmi_twt_pause_dialog_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_pause_dialog_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = params->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
cmd->dialog_id = params->dialog_id;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send wmi command to pause twt dialog: %d",
ret);
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd twt pause dialog vdev %u dialog id %u\n",
cmd->vdev_id, cmd->dialog_id);
return 0;
}
int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
struct wmi_twt_resume_dialog_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_resume_dialog_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_TWT_RESUME_DIALOG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = params->vdev_id;
ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
cmd->dialog_id = params->dialog_id;
cmd->sp_offset_us = params->sp_offset_us;
cmd->next_twt_size = params->next_twt_size;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send wmi command to resume twt dialog: %d",
ret);
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd twt resume dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
cmd->next_twt_size);
return 0;
}
int
ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_obss_spatial_reuse_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->enable = he_obss_pd->enable;
cmd->obss_min = he_obss_pd->min_offset;
cmd->obss_max = he_obss_pd->max_offset;
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
if (ret) {
ath11k_warn(ab,
"Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev obss pd spatial reuse");
return 0;
}
int
ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev set srg bss color bitmap pdev_id %d bss color bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
return 0;
}
int
ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev set srg partial bssid bitmap pdev_id %d partial bssid bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
return 0;
}
int
ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev set srg obsscolor enable pdev_id %d bss color enable bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
return 0;
}
int
ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev set srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
return 0;
}
int
ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev set non srg obss color enable bitmap pdev_id %d bss color enable bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
return 0;
}
int
ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev set non srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
return 0;
}
int
ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
u8 bss_color, u32 period,
bool enable)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_obss_color_collision_cfg_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION :
ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE;
cmd->current_bss_color = bss_color;
cmd->detection_period_ms = period;
cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS;
cmd->free_slot_expiry_time_ms = 0;
cmd->flags = 0;
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd obss color collision det config id %d type %d bss_color %d detect_period %d scan_period %d\n",
cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
cmd->detection_period_ms, cmd->scan_period_ms);
return 0;
}
int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
bool enable)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_bss_color_change_enable_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->enable = enable ? 1 : 0;
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd bss color change enable id %d enable %d\n",
cmd->vdev_id, cmd->enable);
return 0;
}
int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
struct sk_buff *tmpl)
{
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
int ret, len;
size_t aligned_len;
struct wmi_fils_discovery_tmpl_cmd *cmd;
aligned_len = roundup(tmpl->len, 4);
len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"vdev %i set FILS discovery template\n", vdev_id);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_FILS_DISCOVERY_TMPL_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->buf_len = tmpl->len;
ptr = skb->data + sizeof(*cmd);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, aligned_len);
memcpy(tlv->value, tmpl->data, tmpl->len);
ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"WMI vdev %i failed to send FILS discovery template command\n",
vdev_id);
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd fils discovery tmpl");
return 0;
}
int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
struct sk_buff *tmpl)
{
struct wmi_probe_tmpl_cmd *cmd;
struct wmi_bcn_prb_info *probe_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
int ret, len;
size_t aligned_len = roundup(tmpl->len, 4);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"vdev %i set probe response template\n", vdev_id);
len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->buf_len = tmpl->len;
ptr = skb->data + sizeof(*cmd);
probe_info = ptr;
len = sizeof(*probe_info);
probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_BCN_PRB_INFO) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
probe_info->caps = 0;
probe_info->erp = 0;
ptr += sizeof(*probe_info);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, aligned_len);
memcpy(tlv->value, tmpl->data, tmpl->len);
ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"WMI vdev %i failed to send probe response template command\n",
vdev_id);
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd ");
return 0;
}
int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
bool unsol_bcast_probe_resp_enabled)
{
struct sk_buff *skb;
int ret, len;
struct wmi_fils_discovery_cmd *cmd;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"vdev %i set %s interval to %u TU\n",
vdev_id, unsol_bcast_probe_resp_enabled ?
"unsolicited broadcast probe response" : "FILS discovery",
interval);
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_fils_discovery_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->interval = interval;
cmd->config = unsol_bcast_probe_resp_enabled;
ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"WMI vdev %i failed to send FILS discovery enable/disable command\n",
vdev_id);
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd enable fils");
return 0;
}
static void
ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb)
{
const void **tb;
const struct wmi_obss_color_collision_event *ev;
struct ath11k_vif *arvif;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event obss color collision");
rcu_read_lock();
ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
if (!ev) {
ath11k_warn(ab, "failed to fetch obss color collision ev");
goto exit;
}
arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
if (!arvif) {
ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n",
ev->vdev_id);
goto exit;
}
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
GFP_KERNEL);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
break;
case WMI_BSS_COLOR_COLLISION_DISABLE:
case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
break;
default:
ath11k_warn(ab, "received unknown obss color collision detection event\n");
}
exit:
kfree(tb);
rcu_read_unlock();
}
static void
ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
struct wmi_host_pdev_band_to_mac *band_to_mac)
{
u8 i;
struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
struct ath11k_pdev *pdev;
for (i = 0; i < soc->num_radios; i++) {
pdev = &soc->pdevs[i];
hal_reg_cap = &soc->hal_reg_cap[i];
band_to_mac[i].pdev_id = pdev->pdev_id;
switch (pdev->cap.supported_bands) {
case WMI_HOST_WLAN_2G_5G_CAP:
band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
break;
case WMI_HOST_WLAN_2G_CAP:
band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
break;
case WMI_HOST_WLAN_5G_CAP:
band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
break;
default:
break;
}
}
}
static void
ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
struct target_resource_config *tg_cfg)
{
wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
wmi_cfg->num_peers = tg_cfg->num_peers;
wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
wmi_cfg->num_tids = tg_cfg->num_tids;
wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
wmi_cfg->roam_offload_max_ap_profiles =
tg_cfg->roam_offload_max_ap_profiles;
wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
wmi_cfg->vow_config = tg_cfg->vow_config;
wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
wmi_cfg->num_tdls_conn_table_entries =
tg_cfg->num_tdls_conn_table_entries;
wmi_cfg->beacon_tx_offload_max_vdev =
tg_cfg->beacon_tx_offload_max_vdev;
wmi_cfg->num_multicast_filter_entries =
tg_cfg->num_multicast_filter_entries;
wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
wmi_cfg->max_tdls_concurrent_sleep_sta =
tg_cfg->max_tdls_concurrent_sleep_sta;
wmi_cfg->max_tdls_concurrent_buffer_sta =
tg_cfg->max_tdls_concurrent_buffer_sta;
wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
wmi_cfg->flag1 = tg_cfg->flag1;
wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
wmi_cfg->sched_params = tg_cfg->sched_params;
wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
wmi_cfg->host_service_flags &=
~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported <<
WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET;
wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt;
wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period;
}
static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
struct wmi_init_cmd_param *param)
{
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct sk_buff *skb;
struct wmi_init_cmd *cmd;
struct wmi_resource_config *cfg;
struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
struct wmi_pdev_band_to_mac *band_to_mac;
struct wlan_host_mem_chunk *host_mem_chunks;
struct wmi_tlv *tlv;
size_t ret, len;
void *ptr;
u32 hw_mode_len = 0;
u16 idx;
if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
(param->num_band_to_mac * sizeof(*band_to_mac));
len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
(param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_init_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
ptr = skb->data + sizeof(*cmd);
cfg = ptr;
ath11k_wmi_copy_resource_config(cfg, param->res_cfg);
cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
ptr += sizeof(*cfg);
host_mem_chunks = ptr + TLV_HDR_SIZE;
len = sizeof(struct wlan_host_mem_chunk);
for (idx = 0; idx < param->num_mem_chunks; ++idx) {
host_mem_chunks[idx].tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
FIELD_PREP(WMI_TLV_LEN, len);
host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
host_mem_chunks[idx].size = param->mem_chunks[idx].len;
host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"host mem chunk req_id %d paddr 0x%llx len %d\n",
param->mem_chunks[idx].req_id,
(u64)param->mem_chunks[idx].paddr,
param->mem_chunks[idx].len);
}
cmd->num_host_mem_chunks = param->num_mem_chunks;
len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
/* num_mem_chunks is zero */
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE + len;
if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SET_HW_MODE_CMD) |
FIELD_PREP(WMI_TLV_LEN,
sizeof(*hw_mode) - TLV_HDR_SIZE);
hw_mode->hw_mode_index = param->hw_mode_id;
hw_mode->num_band_to_mac = param->num_band_to_mac;
ptr += sizeof(*hw_mode);
len = param->num_band_to_mac * sizeof(*band_to_mac);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
len = sizeof(*band_to_mac);
for (idx = 0; idx < param->num_band_to_mac; idx++) {
band_to_mac = (void *)ptr;
band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_BAND_TO_MAC) |
FIELD_PREP(WMI_TLV_LEN,
len - TLV_HDR_SIZE);
band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
band_to_mac->start_freq =
param->band_to_mac[idx].start_freq;
band_to_mac->end_freq =
param->band_to_mac[idx].end_freq;
ptr += sizeof(*band_to_mac);
}
}
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
if (ret) {
ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd wmi init");
return 0;
}
int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar,
int pdev_id)
{
struct ath11k_wmi_pdev_lro_config_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
cmd->pdev_id = pdev_id;
ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send lro cfg req wmi cmd\n");
goto err;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd lro config pdev_id 0x%x\n", pdev_id);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
{
unsigned long time_left;
time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
if (!time_left)
return -ETIMEDOUT;
return 0;
}
int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab)
{
unsigned long time_left;
time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
if (!time_left)
return -ETIMEDOUT;
return 0;
}
int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
enum wmi_host_hw_mode_config_type mode)
{
struct wmi_pdev_set_hw_mode_cmd_param *cmd;
struct sk_buff *skb;
struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab;
int len;
int ret;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = WMI_PDEV_ID_SOC;
cmd->hw_mode_index = mode;
ret = ath11k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
if (ret) {
ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
dev_kfree_skb(skb);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev set hw mode %d", cmd->hw_mode_index);
return 0;
}
int ath11k_wmi_cmd_init(struct ath11k_base *ab)
{
struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab;
struct wmi_init_cmd_param init_param;
struct target_resource_config config;
memset(&init_param, 0, sizeof(init_param));
memset(&config, 0, sizeof(config));
ab->hw_params.hw_ops->wmi_init_config(ab, &config);
if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
ab->wmi_ab.svc_map))
config.is_reg_cc_ext_event_supported = 1;
memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
init_param.res_cfg = &wmi_sc->wlan_resource_config;
init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
init_param.mem_chunks = wmi_sc->mem_chunks;
if (ab->hw_params.single_pdev_only)
init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
init_param.num_band_to_mac = ab->num_radios;
ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
}
int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
struct ath11k_wmi_vdev_spectral_conf_param *param)
{
struct ath11k_wmi_vdev_spectral_conf_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
memcpy(&cmd->param, param, sizeof(*param));
ret = ath11k_wmi_cmd_send(ar->wmi, skb,
WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send spectral scan config wmi cmd\n");
goto err;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd vdev spectral scan configure vdev_id 0x%x\n",
param->vdev_id);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
u32 trigger, u32 enable)
{
struct ath11k_wmi_vdev_spectral_enable_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->trigger_cmd = trigger;
cmd->enable_cmd = enable;
ret = ath11k_wmi_cmd_send(ar->wmi, skb,
WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send spectral enable wmi cmd\n");
goto err;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd vdev spectral scan enable vdev id 0x%x\n",
vdev_id);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param)
{
struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = param->pdev_id;
cmd->module_id = param->module_id;
cmd->base_paddr_lo = param->base_paddr_lo;
cmd->base_paddr_hi = param->base_paddr_hi;
cmd->head_idx_paddr_lo = param->head_idx_paddr_lo;
cmd->head_idx_paddr_hi = param->head_idx_paddr_hi;
cmd->tail_idx_paddr_lo = param->tail_idx_paddr_lo;
cmd->tail_idx_paddr_hi = param->tail_idx_paddr_hi;
cmd->num_elems = param->num_elems;
cmd->buf_size = param->buf_size;
cmd->num_resp_per_event = param->num_resp_per_event;
cmd->event_timeout_ms = param->event_timeout_ms;
ret = ath11k_wmi_cmd_send(ar->wmi, skb,
WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send dma ring cfg req wmi cmd\n");
goto err;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd pdev dma ring cfg req pdev_id 0x%x\n",
param->pdev_id);
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_dma_buf_release_parse *parse = data;
if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
return -EPROTO;
if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry)
return -ENOBUFS;
parse->num_buf_entry++;
return 0;
}
static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_dma_buf_release_parse *parse = data;
if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
return -EPROTO;
if (parse->num_meta >= parse->fixed.num_meta_data_entry)
return -ENOBUFS;
parse->num_meta++;
return 0;
}
static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_dma_buf_release_parse *parse = data;
int ret;
switch (tag) {
case WMI_TAG_DMA_BUF_RELEASE:
memcpy(&parse->fixed, ptr,
sizeof(struct ath11k_wmi_dma_buf_release_fixed_param));
parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id);
break;
case WMI_TAG_ARRAY_STRUCT:
if (!parse->buf_entry_done) {
parse->num_buf_entry = 0;
parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr;
ret = ath11k_wmi_tlv_iter(ab, ptr, len,
ath11k_wmi_tlv_dma_buf_entry_parse,
parse);
if (ret) {
ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n",
ret);
return ret;
}
parse->buf_entry_done = true;
} else if (!parse->meta_data_done) {
parse->num_meta = 0;
parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr;
ret = ath11k_wmi_tlv_iter(ab, ptr, len,
ath11k_wmi_tlv_dma_buf_meta_parse,
parse);
if (ret) {
ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n",
ret);
return ret;
}
parse->meta_data_done = true;
}
break;
default:
break;
}
return 0;
}
static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct wmi_tlv_dma_buf_release_parse parse = { };
struct ath11k_dbring_buf_release_event param;
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_dma_buf_parse,
&parse);
if (ret) {
ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev dma ring buf release");
param.fixed = parse.fixed;
param.buf_entry = parse.buf_entry;
param.num_buf_entry = parse.num_buf_entry;
param.meta_data = parse.meta_data;
param.num_meta = parse.num_meta;
ret = ath11k_dbring_buffer_release_event(ab, ¶m);
if (ret) {
ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret);
return;
}
}
static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
struct wmi_hw_mode_capabilities *hw_mode_cap;
u32 phy_map = 0;
if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
return -EPROTO;
if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
return -ENOBUFS;
hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
hw_mode_id);
svc_rdy_ext->n_hw_mode_caps++;
phy_map = hw_mode_cap->phy_id_map;
while (phy_map) {
svc_rdy_ext->tot_phy_id++;
phy_map = phy_map >> 1;
}
return 0;
}
static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
u16 len, const void *ptr, void *data)
{
struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
struct wmi_hw_mode_capabilities *hw_mode_caps;
enum wmi_host_hw_mode_config_type mode, pref;
u32 i;
int ret;
svc_rdy_ext->n_hw_mode_caps = 0;
svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
ret = ath11k_wmi_tlv_iter(soc, ptr, len,
ath11k_wmi_tlv_hw_mode_caps_parse,
svc_rdy_ext);
if (ret) {
ath11k_warn(soc, "failed to parse tlv %d\n", ret);
return ret;
}
i = 0;
while (i < svc_rdy_ext->n_hw_mode_caps) {
hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
mode = hw_mode_caps->hw_mode_id;
pref = soc->wmi_ab.preferred_hw_mode;
if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) {
svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
soc->wmi_ab.preferred_hw_mode = mode;
}
i++;
}
ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n",
soc->wmi_ab.preferred_hw_mode);
if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
return -EINVAL;
return 0;
}
static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
return -EPROTO;
if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
return -ENOBUFS;
len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities));
if (!svc_rdy_ext->n_mac_phy_caps) {
svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id,
len, GFP_ATOMIC);
if (!svc_rdy_ext->mac_phy_caps)
return -ENOMEM;
}
memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
svc_rdy_ext->n_mac_phy_caps++;
return 0;
}
static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
return -EPROTO;
if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
return -ENOBUFS;
svc_rdy_ext->n_ext_hal_reg_caps++;
return 0;
}
static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc,
u16 len, const void *ptr, void *data)
{
struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
struct ath11k_hal_reg_capabilities_ext reg_cap;
int ret;
u32 i;
svc_rdy_ext->n_ext_hal_reg_caps = 0;
svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr;
ret = ath11k_wmi_tlv_iter(soc, ptr, len,
ath11k_wmi_tlv_ext_hal_reg_caps_parse,
svc_rdy_ext);
if (ret) {
ath11k_warn(soc, "failed to parse tlv %d\n", ret);
return ret;
}
for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle,
svc_rdy_ext->soc_hal_reg_caps,
svc_rdy_ext->ext_hal_reg_caps, i,
®_cap);
if (ret) {
ath11k_warn(soc, "failed to extract reg cap %d\n", i);
return ret;
}
memcpy(&soc->hal_reg_cap[reg_cap.phy_id],
®_cap, sizeof(reg_cap));
}
return 0;
}
static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
u16 len, const void *ptr,
void *data)
{
struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
u32 phy_id_map;
int pdev_index = 0;
int ret;
svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
soc->num_radios = 0;
soc->target_pdev_count = 0;
phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
while (phy_id_map && soc->num_radios < MAX_RADIOS) {
ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
svc_rdy_ext->hw_caps,
svc_rdy_ext->hw_mode_caps,
svc_rdy_ext->soc_hal_reg_caps,
svc_rdy_ext->mac_phy_caps,
hw_mode_id, soc->num_radios,
&soc->pdevs[pdev_index]);
if (ret) {
ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
soc->num_radios);
return ret;
}
soc->num_radios++;
/* For QCA6390, save mac_phy capability in the same pdev */
if (soc->hw_params.single_pdev_only)
pdev_index = 0;
else
pdev_index = soc->num_radios;
/* TODO: mac_phy_cap prints */
phy_id_map >>= 1;
}
/* For QCA6390, set num_radios to 1 because host manages
* both 2G and 5G radio in one pdev.
* Set pdev_id = 0 and 0 means soc level.
*/
if (soc->hw_params.single_pdev_only) {
soc->num_radios = 1;
soc->pdevs[0].pdev_id = 0;
}
return 0;
}
static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_dma_ring_caps_parse *parse = data;
if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
return -EPROTO;
parse->n_dma_ring_caps++;
return 0;
}
static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab,
u32 num_cap)
{
size_t sz;
void *ptr;
sz = num_cap * sizeof(struct ath11k_dbring_cap);
ptr = kzalloc(sz, GFP_ATOMIC);
if (!ptr)
return -ENOMEM;
ab->db_caps = ptr;
ab->num_db_cap = num_cap;
return 0;
}
static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
}
static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
u16 len, const void *ptr, void *data)
{
struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
struct wmi_dma_ring_capabilities *dma_caps;
struct ath11k_dbring_cap *dir_buff_caps;
int ret;
u32 i;
dma_caps_parse->n_dma_ring_caps = 0;
dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
ret = ath11k_wmi_tlv_iter(ab, ptr, len,
ath11k_wmi_tlv_dma_ring_caps_parse,
dma_caps_parse);
if (ret) {
ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
return ret;
}
if (!dma_caps_parse->n_dma_ring_caps)
return 0;
if (ab->num_db_cap) {
ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n");
return 0;
}
ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
if (ret)
return ret;
dir_buff_caps = ab->db_caps;
for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id);
ret = -EINVAL;
goto free_dir_buff;
}
dir_buff_caps[i].id = dma_caps[i].module_id;
dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
}
return 0;
free_dir_buff:
ath11k_wmi_free_dbring_caps(ab);
return ret;
}
static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
int ret;
switch (tag) {
case WMI_TAG_SERVICE_READY_EXT_EVENT:
ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr,
&svc_rdy_ext->param);
if (ret) {
ath11k_warn(ab, "unable to extract ext params\n");
return ret;
}
break;
case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
break;
case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr,
svc_rdy_ext);
if (ret)
return ret;
break;
case WMI_TAG_ARRAY_STRUCT:
if (!svc_rdy_ext->hw_mode_done) {
ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr,
svc_rdy_ext);
if (ret)
return ret;
svc_rdy_ext->hw_mode_done = true;
} else if (!svc_rdy_ext->mac_phy_done) {
svc_rdy_ext->n_mac_phy_caps = 0;
ret = ath11k_wmi_tlv_iter(ab, ptr, len,
ath11k_wmi_tlv_mac_phy_caps_parse,
svc_rdy_ext);
if (ret) {
ath11k_warn(ab, "failed to parse tlv %d\n", ret);
return ret;
}
svc_rdy_ext->mac_phy_done = true;
} else if (!svc_rdy_ext->ext_hal_reg_done) {
ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr,
svc_rdy_ext);
if (ret)
return ret;
svc_rdy_ext->ext_hal_reg_done = true;
} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
svc_rdy_ext->mac_phy_chainmask_combo_done = true;
} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
svc_rdy_ext->mac_phy_chainmask_cap_done = true;
} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
svc_rdy_ext->oem_dma_ring_cap_done = true;
} else if (!svc_rdy_ext->dma_ring_cap_done) {
ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
&svc_rdy_ext->dma_caps_parse);
if (ret)
return ret;
svc_rdy_ext->dma_ring_cap_done = true;
}
break;
default:
break;
}
return 0;
}
static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_svc_rdy_ext_parse,
&svc_rdy_ext);
if (ret) {
ath11k_warn(ab, "failed to parse tlv %d\n", ret);
goto err;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext");
if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
complete(&ab->wmi_ab.service_ready);
kfree(svc_rdy_ext.mac_phy_caps);
return 0;
err:
ath11k_wmi_free_dbring_caps(ab);
return ret;
}
static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
int ret;
switch (tag) {
case WMI_TAG_ARRAY_STRUCT:
if (!parse->dma_ring_cap_done) {
ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
&parse->dma_caps_parse);
if (ret)
return ret;
parse->dma_ring_cap_done = true;
}
break;
default:
break;
}
return 0;
}
static int ath11k_service_ready_ext2_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_svc_rdy_ext2_parse,
&svc_rdy_ext2);
if (ret) {
ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
goto err;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext2");
complete(&ab->wmi_ab.service_ready);
return 0;
err:
ath11k_wmi_free_dbring_caps(ab);
return ret;
}
static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
struct wmi_vdev_start_resp_event *vdev_rsp)
{
const void **tb;
const struct wmi_vdev_start_resp_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch vdev start resp ev");
kfree(tb);
return -EPROTO;
}
memset(vdev_rsp, 0, sizeof(*vdev_rsp));
vdev_rsp->vdev_id = ev->vdev_id;
vdev_rsp->requestor_id = ev->requestor_id;
vdev_rsp->resp_type = ev->resp_type;
vdev_rsp->status = ev->status;
vdev_rsp->chain_mask = ev->chain_mask;
vdev_rsp->smps_mode = ev->smps_mode;
vdev_rsp->mac_id = ev->mac_id;
vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
kfree(tb);
return 0;
}
static void ath11k_print_reg_rule(struct ath11k_base *ab, const char *band,
u32 num_reg_rules,
struct cur_reg_rule *reg_rule_ptr)
{
struct cur_reg_rule *reg_rule = reg_rule_ptr;
u32 count;
ath11k_dbg(ab, ATH11K_DBG_WMI, "number of reg rules in %s band: %d\n",
band, num_reg_rules);
for (count = 0; count < num_reg_rules; count++) {
ath11k_dbg(ab, ATH11K_DBG_WMI,
"reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n",
count + 1, reg_rule->start_freq, reg_rule->end_freq,
reg_rule->max_bw, reg_rule->ant_gain,
reg_rule->reg_power, reg_rule->flags);
reg_rule++;
}
}
static struct cur_reg_rule
*create_reg_rules_from_wmi(u32 num_reg_rules,
struct wmi_regulatory_rule_struct *wmi_reg_rule)
{
struct cur_reg_rule *reg_rule_ptr;
u32 count;
reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr),
GFP_ATOMIC);
if (!reg_rule_ptr)
return NULL;
for (count = 0; count < num_reg_rules; count++) {
reg_rule_ptr[count].start_freq =
FIELD_GET(REG_RULE_START_FREQ,
wmi_reg_rule[count].freq_info);
reg_rule_ptr[count].end_freq =
FIELD_GET(REG_RULE_END_FREQ,
wmi_reg_rule[count].freq_info);
reg_rule_ptr[count].max_bw =
FIELD_GET(REG_RULE_MAX_BW,
wmi_reg_rule[count].bw_pwr_info);
reg_rule_ptr[count].reg_power =
FIELD_GET(REG_RULE_REG_PWR,
wmi_reg_rule[count].bw_pwr_info);
reg_rule_ptr[count].ant_gain =
FIELD_GET(REG_RULE_ANT_GAIN,
wmi_reg_rule[count].bw_pwr_info);
reg_rule_ptr[count].flags =
FIELD_GET(REG_RULE_FLAGS,
wmi_reg_rule[count].flag_info);
}
return reg_rule_ptr;
}
static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
struct sk_buff *skb,
struct cur_regulatory_info *reg_info)
{
const void **tb;
const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
struct wmi_regulatory_rule_struct *wmi_reg_rule;
u32 num_2ghz_reg_rules, num_5ghz_reg_rules;
int ret;
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
if (!chan_list_event_hdr) {
ath11k_warn(ab, "failed to fetch reg chan list update ev\n");
kfree(tb);
return -EPROTO;
}
reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules;
reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules;
if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) {
ath11k_warn(ab, "No regulatory rules available in the event info\n");
kfree(tb);
return -EINVAL;
}
memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2,
REG_ALPHA2_LEN);
reg_info->dfs_region = chan_list_event_hdr->dfs_region;
reg_info->phybitmap = chan_list_event_hdr->phybitmap;
reg_info->num_phy = chan_list_event_hdr->num_phy;
reg_info->phy_id = chan_list_event_hdr->phy_id;
reg_info->ctry_code = chan_list_event_hdr->country_id;
reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"status_code %s",
ath11k_cc_status_to_str(reg_info->status_code));
reg_info->status_code =
ath11k_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code);
reg_info->is_ext_reg_event = false;
reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz;
reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz;
reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz;
reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz;
num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d",
reg_info->alpha2, reg_info->dfs_region,
reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"num_2ghz_reg_rules %d num_5ghz_reg_rules %d",
num_2ghz_reg_rules, num_5ghz_reg_rules);
wmi_reg_rule =
(struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr
+ sizeof(*chan_list_event_hdr)
+ sizeof(struct wmi_tlv));
if (num_2ghz_reg_rules) {
reg_info->reg_rules_2ghz_ptr =
create_reg_rules_from_wmi(num_2ghz_reg_rules,
wmi_reg_rule);
if (!reg_info->reg_rules_2ghz_ptr) {
kfree(tb);
ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n");
return -ENOMEM;
}
ath11k_print_reg_rule(ab, "2 GHz",
num_2ghz_reg_rules,
reg_info->reg_rules_2ghz_ptr);
}
if (num_5ghz_reg_rules) {
wmi_reg_rule += num_2ghz_reg_rules;
reg_info->reg_rules_5ghz_ptr =
create_reg_rules_from_wmi(num_5ghz_reg_rules,
wmi_reg_rule);
if (!reg_info->reg_rules_5ghz_ptr) {
kfree(tb);
ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n");
return -ENOMEM;
}
ath11k_print_reg_rule(ab, "5 GHz",
num_5ghz_reg_rules,
reg_info->reg_rules_5ghz_ptr);
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n");
kfree(tb);
return 0;
}
static struct cur_reg_rule
*create_ext_reg_rules_from_wmi(u32 num_reg_rules,
struct wmi_regulatory_ext_rule *wmi_reg_rule)
{
struct cur_reg_rule *reg_rule_ptr;
u32 count;
reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr), GFP_ATOMIC);
if (!reg_rule_ptr)
return NULL;
for (count = 0; count < num_reg_rules; count++) {
reg_rule_ptr[count].start_freq =
u32_get_bits(wmi_reg_rule[count].freq_info,
REG_RULE_START_FREQ);
reg_rule_ptr[count].end_freq =
u32_get_bits(wmi_reg_rule[count].freq_info,
REG_RULE_END_FREQ);
reg_rule_ptr[count].max_bw =
u32_get_bits(wmi_reg_rule[count].bw_pwr_info,
REG_RULE_MAX_BW);
reg_rule_ptr[count].reg_power =
u32_get_bits(wmi_reg_rule[count].bw_pwr_info,
REG_RULE_REG_PWR);
reg_rule_ptr[count].ant_gain =
u32_get_bits(wmi_reg_rule[count].bw_pwr_info,
REG_RULE_ANT_GAIN);
reg_rule_ptr[count].flags =
u32_get_bits(wmi_reg_rule[count].flag_info,
REG_RULE_FLAGS);
reg_rule_ptr[count].psd_flag =
u32_get_bits(wmi_reg_rule[count].psd_power_info,
REG_RULE_PSD_INFO);
reg_rule_ptr[count].psd_eirp =
u32_get_bits(wmi_reg_rule[count].psd_power_info,
REG_RULE_PSD_EIRP);
}
return reg_rule_ptr;
}
static u8
ath11k_invalid_5ghz_reg_ext_rules_from_wmi(u32 num_reg_rules,
const struct wmi_regulatory_ext_rule *rule)
{
u8 num_invalid_5ghz_rules = 0;
u32 count, start_freq;
for (count = 0; count < num_reg_rules; count++) {
start_freq = u32_get_bits(rule[count].freq_info,
REG_RULE_START_FREQ);
if (start_freq >= ATH11K_MIN_6G_FREQ)
num_invalid_5ghz_rules++;
}
return num_invalid_5ghz_rules;
}
static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab,
struct sk_buff *skb,
struct cur_regulatory_info *reg_info)
{
const void **tb;
const struct wmi_reg_chan_list_cc_ext_event *ev;
struct wmi_regulatory_ext_rule *ext_wmi_reg_rule;
u32 num_2ghz_reg_rules, num_5ghz_reg_rules;
u32 num_6ghz_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
u32 num_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
u32 total_reg_rules = 0;
int ret, i, j, num_invalid_5ghz_ext_rules = 0;
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n");
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch reg chan list ext update ev\n");
kfree(tb);
return -EPROTO;
}
reg_info->num_2ghz_reg_rules = ev->num_2ghz_reg_rules;
reg_info->num_5ghz_reg_rules = ev->num_5ghz_reg_rules;
reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] =
ev->num_6ghz_reg_rules_ap_lpi;
reg_info->num_6ghz_rules_ap[WMI_REG_STANDARD_POWER_AP] =
ev->num_6ghz_reg_rules_ap_sp;
reg_info->num_6ghz_rules_ap[WMI_REG_VERY_LOW_POWER_AP] =
ev->num_6ghz_reg_rules_ap_vlp;
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i] =
ev->num_6ghz_reg_rules_client_lpi[i];
reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i] =
ev->num_6ghz_reg_rules_client_sp[i];
reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i] =
ev->num_6ghz_reg_rules_client_vlp[i];
}
num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
total_reg_rules += num_2ghz_reg_rules;
total_reg_rules += num_5ghz_reg_rules;
if ((num_2ghz_reg_rules > MAX_REG_RULES) ||
(num_5ghz_reg_rules > MAX_REG_RULES)) {
ath11k_warn(ab, "Num reg rules for 2.4 GHz/5 GHz exceeds max limit (num_2ghz_reg_rules: %d num_5ghz_reg_rules: %d max_rules: %d)\n",
num_2ghz_reg_rules, num_5ghz_reg_rules, MAX_REG_RULES);
kfree(tb);
return -EINVAL;
}
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
num_6ghz_reg_rules_ap[i] = reg_info->num_6ghz_rules_ap[i];
if (num_6ghz_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
ath11k_warn(ab, "Num 6 GHz reg rules for AP mode(%d) exceeds max limit (num_6ghz_reg_rules_ap: %d, max_rules: %d)\n",
i, num_6ghz_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
kfree(tb);
return -EINVAL;
}
total_reg_rules += num_6ghz_reg_rules_ap[i];
}
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
num_6ghz_client[WMI_REG_INDOOR_AP][i] =
reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i];
total_reg_rules += num_6ghz_client[WMI_REG_INDOOR_AP][i];
num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i];
total_reg_rules += num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i];
num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i];
total_reg_rules += num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i];
if ((num_6ghz_client[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES) ||
(num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] >
MAX_6GHZ_REG_RULES) ||
(num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] >
MAX_6GHZ_REG_RULES)) {
ath11k_warn(ab,
"Num 6 GHz client reg rules exceeds max limit, for client(type: %d)\n",
i);
kfree(tb);
return -EINVAL;
}
}
if (!total_reg_rules) {
ath11k_warn(ab, "No reg rules available\n");
kfree(tb);
return -EINVAL;
}
memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
reg_info->dfs_region = ev->dfs_region;
reg_info->phybitmap = ev->phybitmap;
reg_info->num_phy = ev->num_phy;
reg_info->phy_id = ev->phy_id;
reg_info->ctry_code = ev->country_id;
reg_info->reg_dmn_pair = ev->domain_code;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"status_code %s",
ath11k_cc_status_to_str(reg_info->status_code));
reg_info->status_code =
ath11k_wmi_cc_setting_code_to_reg(ev->status_code);
reg_info->is_ext_reg_event = true;
reg_info->min_bw_2ghz = ev->min_bw_2ghz;
reg_info->max_bw_2ghz = ev->max_bw_2ghz;
reg_info->min_bw_5ghz = ev->min_bw_5ghz;
reg_info->max_bw_5ghz = ev->max_bw_5ghz;
reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP] =
ev->min_bw_6ghz_ap_lpi;
reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP] =
ev->max_bw_6ghz_ap_lpi;
reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] =
ev->min_bw_6ghz_ap_sp;
reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] =
ev->max_bw_6ghz_ap_sp;
reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] =
ev->min_bw_6ghz_ap_vlp;
reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] =
ev->max_bw_6ghz_ap_vlp;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"6 GHz AP BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n",
reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP],
reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP],
reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP],
reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP],
reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP],
reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP]);
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i] =
ev->min_bw_6ghz_client_lpi[i];
reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i] =
ev->max_bw_6ghz_client_lpi[i];
reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
ev->min_bw_6ghz_client_sp[i];
reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
ev->max_bw_6ghz_client_sp[i];
reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
ev->min_bw_6ghz_client_vlp[i];
reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
ev->max_bw_6ghz_client_vlp[i];
ath11k_dbg(ab, ATH11K_DBG_WMI,
"6 GHz %s BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n",
ath11k_6ghz_client_type_to_str(i),
reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i],
reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i],
reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i],
reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i],
reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i],
reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]);
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"cc_ext %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d",
reg_info->alpha2, reg_info->dfs_region,
reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"num_2ghz_reg_rules %d num_5ghz_reg_rules %d",
num_2ghz_reg_rules, num_5ghz_reg_rules);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"num_6ghz_reg_rules_ap_lpi: %d num_6ghz_reg_rules_ap_sp: %d num_6ghz_reg_rules_ap_vlp: %d",
num_6ghz_reg_rules_ap[WMI_REG_INDOOR_AP],
num_6ghz_reg_rules_ap[WMI_REG_STANDARD_POWER_AP],
num_6ghz_reg_rules_ap[WMI_REG_VERY_LOW_POWER_AP]);
j = WMI_REG_DEFAULT_CLIENT;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"6 GHz Regular client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d",
num_6ghz_client[WMI_REG_INDOOR_AP][j],
num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j],
num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]);
j = WMI_REG_SUBORDINATE_CLIENT;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"6 GHz Subordinate client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d",
num_6ghz_client[WMI_REG_INDOOR_AP][j],
num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j],
num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]);
ext_wmi_reg_rule =
(struct wmi_regulatory_ext_rule *)((u8 *)ev + sizeof(*ev) +
sizeof(struct wmi_tlv));
if (num_2ghz_reg_rules) {
reg_info->reg_rules_2ghz_ptr =
create_ext_reg_rules_from_wmi(num_2ghz_reg_rules,
ext_wmi_reg_rule);
if (!reg_info->reg_rules_2ghz_ptr) {
kfree(tb);
ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n");
return -ENOMEM;
}
ath11k_print_reg_rule(ab, "2 GHz",
num_2ghz_reg_rules,
reg_info->reg_rules_2ghz_ptr);
}
ext_wmi_reg_rule += num_2ghz_reg_rules;
/* Firmware might include 6 GHz reg rule in 5 GHz rule list
* for few countries along with separate 6 GHz rule.
* Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
* causes intersect check to be true, and same rules will be
* shown multiple times in iw cmd.
* Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
*/
num_invalid_5ghz_ext_rules =
ath11k_invalid_5ghz_reg_ext_rules_from_wmi(num_5ghz_reg_rules,
ext_wmi_reg_rule);
if (num_invalid_5ghz_ext_rules) {
ath11k_dbg(ab, ATH11K_DBG_WMI,
"CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
reg_info->alpha2, reg_info->num_5ghz_reg_rules,
num_invalid_5ghz_ext_rules);
num_5ghz_reg_rules = num_5ghz_reg_rules - num_invalid_5ghz_ext_rules;
reg_info->num_5ghz_reg_rules = num_5ghz_reg_rules;
}
if (num_5ghz_reg_rules) {
reg_info->reg_rules_5ghz_ptr =
create_ext_reg_rules_from_wmi(num_5ghz_reg_rules,
ext_wmi_reg_rule);
if (!reg_info->reg_rules_5ghz_ptr) {
kfree(tb);
ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n");
return -ENOMEM;
}
ath11k_print_reg_rule(ab, "5 GHz",
num_5ghz_reg_rules,
reg_info->reg_rules_5ghz_ptr);
}
/* We have adjusted the number of 5 GHz reg rules above. But still those
* many rules needs to be adjusted in ext_wmi_reg_rule.
*
* NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
*/
ext_wmi_reg_rule += (num_5ghz_reg_rules + num_invalid_5ghz_ext_rules);
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
reg_info->reg_rules_6ghz_ap_ptr[i] =
create_ext_reg_rules_from_wmi(num_6ghz_reg_rules_ap[i],
ext_wmi_reg_rule);
if (!reg_info->reg_rules_6ghz_ap_ptr[i]) {
kfree(tb);
ath11k_warn(ab, "Unable to Allocate memory for 6 GHz AP rules\n");
return -ENOMEM;
}
ath11k_print_reg_rule(ab, ath11k_6ghz_ap_type_to_str(i),
num_6ghz_reg_rules_ap[i],
reg_info->reg_rules_6ghz_ap_ptr[i]);
ext_wmi_reg_rule += num_6ghz_reg_rules_ap[i];
}
for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
ath11k_dbg(ab, ATH11K_DBG_WMI,
"6 GHz AP type %s", ath11k_6ghz_ap_type_to_str(j));
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
reg_info->reg_rules_6ghz_client_ptr[j][i] =
create_ext_reg_rules_from_wmi(num_6ghz_client[j][i],
ext_wmi_reg_rule);
if (!reg_info->reg_rules_6ghz_client_ptr[j][i]) {
kfree(tb);
ath11k_warn(ab, "Unable to Allocate memory for 6 GHz client rules\n");
return -ENOMEM;
}
ath11k_print_reg_rule(ab,
ath11k_6ghz_client_type_to_str(i),
num_6ghz_client[j][i],
reg_info->reg_rules_6ghz_client_ptr[j][i]);
ext_wmi_reg_rule += num_6ghz_client[j][i];
}
}
reg_info->client_type = ev->client_type;
reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
reg_info->unspecified_ap_usable =
ev->unspecified_ap_usable;
reg_info->domain_code_6ghz_ap[WMI_REG_INDOOR_AP] =
ev->domain_code_6ghz_ap_lpi;
reg_info->domain_code_6ghz_ap[WMI_REG_STANDARD_POWER_AP] =
ev->domain_code_6ghz_ap_sp;
reg_info->domain_code_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] =
ev->domain_code_6ghz_ap_vlp;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"6 GHz reg info client type %s rnr_tpe_usable %d unspecified_ap_usable %d AP sub domain: lpi %s, sp %s, vlp %s\n",
ath11k_6ghz_client_type_to_str(reg_info->client_type),
reg_info->rnr_tpe_usable,
reg_info->unspecified_ap_usable,
ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_lpi),
ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_sp),
ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_vlp));
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
reg_info->domain_code_6ghz_client[WMI_REG_INDOOR_AP][i] =
ev->domain_code_6ghz_client_lpi[i];
reg_info->domain_code_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
ev->domain_code_6ghz_client_sp[i];
reg_info->domain_code_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
ev->domain_code_6ghz_client_vlp[i];
ath11k_dbg(ab, ATH11K_DBG_WMI,
"6 GHz client type %s client sub domain: lpi %s, sp %s, vlp %s\n",
ath11k_6ghz_client_type_to_str(i),
ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_lpi[i]),
ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_sp[i]),
ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_vlp[i])
);
}
reg_info->domain_code_6ghz_super_id = ev->domain_code_6ghz_super_id;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"6 GHz client_type %s 6 GHz super domain %s",
ath11k_6ghz_client_type_to_str(reg_info->client_type),
ath11k_super_reg_6ghz_to_str(reg_info->domain_code_6ghz_super_id));
ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory ext channel list\n");
kfree(tb);
return 0;
}
static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb,
struct wmi_peer_delete_resp_event *peer_del_resp)
{
const void **tb;
const struct wmi_peer_delete_resp_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch peer delete resp ev");
kfree(tb);
return -EPROTO;
}
memset(peer_del_resp, 0, sizeof(*peer_del_resp));
peer_del_resp->vdev_id = ev->vdev_id;
ether_addr_copy(peer_del_resp->peer_macaddr.addr,
ev->peer_macaddr.addr);
kfree(tb);
return 0;
}
static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
struct sk_buff *skb,
u32 *vdev_id)
{
const void **tb;
const struct wmi_vdev_delete_resp_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch vdev delete resp ev");
kfree(tb);
return -EPROTO;
}
*vdev_id = ev->vdev_id;
kfree(tb);
return 0;
}
static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
u32 len, u32 *vdev_id,
u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch bcn tx status ev");
kfree(tb);
return -EPROTO;
}
*vdev_id = ev->vdev_id;
*tx_status = ev->tx_status;
kfree(tb);
return 0;
}
static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb,
u32 *vdev_id)
{
const void **tb;
const struct wmi_vdev_stopped_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch vdev stop ev");
kfree(tb);
return -EPROTO;
}
*vdev_id = ev->vdev_id;
kfree(tb);
return 0;
}
static int ath11k_wmi_tlv_mgmt_rx_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_mgmt_rx_parse *parse = data;
switch (tag) {
case WMI_TAG_MGMT_RX_HDR:
parse->fixed = ptr;
break;
case WMI_TAG_ARRAY_BYTE:
if (!parse->frame_buf_done) {
parse->frame_buf = ptr;
parse->frame_buf_done = true;
}
break;
}
return 0;
}
static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
struct sk_buff *skb,
struct mgmt_rx_event_params *hdr)
{
struct wmi_tlv_mgmt_rx_parse parse = { };
const struct wmi_mgmt_rx_hdr *ev;
const u8 *frame;
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_mgmt_rx_parse,
&parse);
if (ret) {
ath11k_warn(ab, "failed to parse mgmt rx tlv %d\n",
ret);
return ret;
}
ev = parse.fixed;
frame = parse.frame_buf;
if (!ev || !frame) {
ath11k_warn(ab, "failed to fetch mgmt rx hdr");
return -EPROTO;
}
hdr->pdev_id = ev->pdev_id;
hdr->chan_freq = ev->chan_freq;
hdr->channel = ev->channel;
hdr->snr = ev->snr;
hdr->rate = ev->rate;
hdr->phy_mode = ev->phy_mode;
hdr->buf_len = ev->buf_len;
hdr->status = ev->status;
hdr->flags = ev->flags;
hdr->rssi = ev->rssi;
hdr->tsf_delta = ev->tsf_delta;
memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
if (skb->len < (frame - skb->data) + hdr->buf_len) {
ath11k_warn(ab, "invalid length in mgmt rx hdr ev");
return -EPROTO;
}
/* shift the sk_buff to point to `frame` */
skb_trim(skb, 0);
skb_put(skb, frame - skb->data);
skb_pull(skb, frame - skb->data);
skb_put(skb, hdr->buf_len);
ath11k_ce_byte_swap(skb->data, hdr->buf_len);
return 0;
}
static int wmi_process_mgmt_tx_comp(struct ath11k *ar,
struct wmi_mgmt_tx_compl_event *tx_compl_param)
{
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
int num_mgmt;
spin_lock_bh(&ar->txmgmt_idr_lock);
msdu = idr_find(&ar->txmgmt_idr, tx_compl_param->desc_id);
if (!msdu) {
ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
tx_compl_param->desc_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
return -ENOENT;
}
idr_remove(&ar->txmgmt_idr, tx_compl_param->desc_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
skb_cb = ATH11K_SKB_CB(msdu);
dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) &&
!tx_compl_param->status) {
info->flags |= IEEE80211_TX_STAT_ACK;
if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
ar->ab->wmi_ab.svc_map))
info->status.ack_signal = tx_compl_param->ack_rssi;
}
ieee80211_tx_status_irqsafe(ar->hw, msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
/* WARN when we received this event without doing any mgmt tx */
if (num_mgmt < 0)
WARN_ON_ONCE(1);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"mgmt tx comp pending %d desc id %d\n",
num_mgmt, tx_compl_param->desc_id);
if (!num_mgmt)
wake_up(&ar->txmgmt_empty_waitq);
return 0;
}
static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
struct sk_buff *skb,
struct wmi_mgmt_tx_compl_event *param)
{
const void **tb;
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch mgmt tx compl ev");
kfree(tb);
return -EPROTO;
}
param->pdev_id = ev->pdev_id;
param->desc_id = ev->desc_id;
param->status = ev->status;
param->ack_rssi = ev->ack_rssi;
kfree(tb);
return 0;
}
static void ath11k_wmi_event_scan_started(struct ath11k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
ath11k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH11K_SCAN_STARTING:
ar->scan.state = ATH11K_SCAN_RUNNING;
if (ar->scan.is_roc)
ieee80211_ready_on_channel(ar->hw);
complete(&ar->scan.started);
break;
}
}
static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
ath11k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH11K_SCAN_STARTING:
complete(&ar->scan.started);
__ath11k_mac_scan_finish(ar);
break;
}
}
static void ath11k_wmi_event_scan_completed(struct ath11k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
case ATH11K_SCAN_STARTING:
/* One suspected reason scan can be completed while starting is
* if firmware fails to deliver all scan events to the host,
* e.g. when transport pipe is full. This has been observed
* with spectral scan phyerr events starving wmi transport
* pipe. In such case the "scan completed" event should be (and
* is) ignored by the host as it may be just firmware's scan
* state machine recovering.
*/
ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
ath11k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
__ath11k_mac_scan_finish(ar);
break;
}
}
static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
case ATH11K_SCAN_STARTING:
ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
ath11k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
ar->scan_channel = NULL;
break;
}
}
static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
case ATH11K_SCAN_STARTING:
ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
ath11k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
if (ar->scan.is_roc && ar->scan.roc_freq == freq)
complete(&ar->scan.on_channel);
break;
}
}
static const char *
ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
enum wmi_scan_completion_reason reason)
{
switch (type) {
case WMI_SCAN_EVENT_STARTED:
return "started";
case WMI_SCAN_EVENT_COMPLETED:
switch (reason) {
case WMI_SCAN_REASON_COMPLETED:
return "completed";
case WMI_SCAN_REASON_CANCELLED:
return "completed [cancelled]";
case WMI_SCAN_REASON_PREEMPTED:
return "completed [preempted]";
case WMI_SCAN_REASON_TIMEDOUT:
return "completed [timedout]";
case WMI_SCAN_REASON_INTERNAL_FAILURE:
return "completed [internal err]";
case WMI_SCAN_REASON_MAX:
break;
}
return "completed [unknown]";
case WMI_SCAN_EVENT_BSS_CHANNEL:
return "bss channel";
case WMI_SCAN_EVENT_FOREIGN_CHAN:
return "foreign channel";
case WMI_SCAN_EVENT_DEQUEUED:
return "dequeued";
case WMI_SCAN_EVENT_PREEMPTED:
return "preempted";
case WMI_SCAN_EVENT_START_FAILED:
return "start failed";
case WMI_SCAN_EVENT_RESTARTED:
return "restarted";
case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
return "foreign channel exit";
default:
return "unknown";
}
}
static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
struct wmi_scan_event *scan_evt_param)
{
const void **tb;
const struct wmi_scan_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_SCAN_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch scan ev");
kfree(tb);
return -EPROTO;
}
scan_evt_param->event_type = ev->event_type;
scan_evt_param->reason = ev->reason;
scan_evt_param->channel_freq = ev->channel_freq;
scan_evt_param->scan_req_id = ev->scan_req_id;
scan_evt_param->scan_id = ev->scan_id;
scan_evt_param->vdev_id = ev->vdev_id;
scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
kfree(tb);
return 0;
}
static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb,
struct wmi_peer_sta_kickout_arg *arg)
{
const void **tb;
const struct wmi_peer_sta_kickout_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch peer sta kickout ev");
kfree(tb);
return -EPROTO;
}
arg->mac_addr = ev->peer_macaddr.addr;
kfree(tb);
return 0;
}
static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
struct wmi_roam_event *roam_ev)
{
const void **tb;
const struct wmi_roam_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_ROAM_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch roam ev");
kfree(tb);
return -EPROTO;
}
roam_ev->vdev_id = ev->vdev_id;
roam_ev->reason = ev->reason;
roam_ev->rssi = ev->rssi;
kfree(tb);
return 0;
}
static int freq_to_idx(struct ath11k *ar, int freq)
{
struct ieee80211_supported_band *sband;
int band, ch, idx = 0;
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
sband = ar->hw->wiphy->bands[band];
if (!sband)
continue;
for (ch = 0; ch < sband->n_channels; ch++, idx++)
if (sband->channels[ch].center_freq == freq)
goto exit;
}
exit:
return idx;
}
static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
u32 len, struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_CHAN_INFO_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch chan info ev");
kfree(tb);
return -EPROTO;
}
ch_info_ev->err_code = ev->err_code;
ch_info_ev->freq = ev->freq;
ch_info_ev->cmd_flags = ev->cmd_flags;
ch_info_ev->noise_floor = ev->noise_floor;
ch_info_ev->rx_clear_count = ev->rx_clear_count;
ch_info_ev->cycle_count = ev->cycle_count;
ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
ch_info_ev->rx_frame_count = ev->rx_frame_count;
ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
ch_info_ev->vdev_id = ev->vdev_id;
kfree(tb);
return 0;
}
static int
ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
{
const void **tb;
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch pdev bss chan info ev");
kfree(tb);
return -EPROTO;
}
bss_ch_info_ev->pdev_id = ev->pdev_id;
bss_ch_info_ev->freq = ev->freq;
bss_ch_info_ev->noise_floor = ev->noise_floor;
bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
kfree(tb);
return 0;
}
static int
ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb,
struct wmi_vdev_install_key_complete_arg *arg)
{
const void **tb;
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch vdev install key compl ev");
kfree(tb);
return -EPROTO;
}
arg->vdev_id = ev->vdev_id;
arg->macaddr = ev->peer_macaddr.addr;
arg->key_idx = ev->key_idx;
arg->key_flags = ev->key_flags;
arg->status = ev->status;
kfree(tb);
return 0;
}
static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb,
struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
{
const void **tb;
const struct wmi_peer_assoc_conf_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch peer assoc conf ev");
kfree(tb);
return -EPROTO;
}
peer_assoc_conf->vdev_id = ev->vdev_id;
peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
kfree(tb);
return 0;
}
static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
struct ath11k_fw_stats_pdev *dst)
{
dst->ch_noise_floor = src->chan_nf;
dst->tx_frame_count = src->tx_frame_count;
dst->rx_frame_count = src->rx_frame_count;
dst->rx_clear_count = src->rx_clear_count;
dst->cycle_count = src->cycle_count;
dst->phy_err_count = src->phy_err_count;
dst->chan_tx_power = src->chan_tx_pwr;
}
static void
ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
struct ath11k_fw_stats_pdev *dst)
{
dst->comp_queued = src->comp_queued;
dst->comp_delivered = src->comp_delivered;
dst->msdu_enqued = src->msdu_enqued;
dst->mpdu_enqued = src->mpdu_enqued;
dst->wmm_drop = src->wmm_drop;
dst->local_enqued = src->local_enqued;
dst->local_freed = src->local_freed;
dst->hw_queued = src->hw_queued;
dst->hw_reaped = src->hw_reaped;
dst->underrun = src->underrun;
dst->hw_paused = src->hw_paused;
dst->tx_abort = src->tx_abort;
dst->mpdus_requeued = src->mpdus_requeued;
dst->tx_ko = src->tx_ko;
dst->tx_xretry = src->tx_xretry;
dst->data_rc = src->data_rc;
dst->self_triggers = src->self_triggers;
dst->sw_retry_failure = src->sw_retry_failure;
dst->illgl_rate_phy_err = src->illgl_rate_phy_err;
dst->pdev_cont_xretry = src->pdev_cont_xretry;
dst->pdev_tx_timeout = src->pdev_tx_timeout;
dst->pdev_resets = src->pdev_resets;
dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
dst->phy_underrun = src->phy_underrun;
dst->txop_ovf = src->txop_ovf;
dst->seq_posted = src->seq_posted;
dst->seq_failed_queueing = src->seq_failed_queueing;
dst->seq_completed = src->seq_completed;
dst->seq_restarted = src->seq_restarted;
dst->mu_seq_posted = src->mu_seq_posted;
dst->mpdus_sw_flush = src->mpdus_sw_flush;
dst->mpdus_hw_filter = src->mpdus_hw_filter;
dst->mpdus_truncated = src->mpdus_truncated;
dst->mpdus_ack_failed = src->mpdus_ack_failed;
dst->mpdus_expired = src->mpdus_expired;
}
static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
struct ath11k_fw_stats_pdev *dst)
{
dst->mid_ppdu_route_change = src->mid_ppdu_route_change;
dst->status_rcvd = src->status_rcvd;
dst->r0_frags = src->r0_frags;
dst->r1_frags = src->r1_frags;
dst->r2_frags = src->r2_frags;
dst->r3_frags = src->r3_frags;
dst->htt_msdus = src->htt_msdus;
dst->htt_mpdus = src->htt_mpdus;
dst->loc_msdus = src->loc_msdus;
dst->loc_mpdus = src->loc_mpdus;
dst->oversize_amsdu = src->oversize_amsdu;
dst->phy_errs = src->phy_errs;
dst->phy_err_drop = src->phy_err_drop;
dst->mpdu_errs = src->mpdu_errs;
dst->rx_ovfl_errs = src->rx_ovfl_errs;
}
static void
ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src,
struct ath11k_fw_stats_vdev *dst)
{
int i;
dst->vdev_id = src->vdev_id;
dst->beacon_snr = src->beacon_snr;
dst->data_snr = src->data_snr;
dst->num_rx_frames = src->num_rx_frames;
dst->num_rts_fail = src->num_rts_fail;
dst->num_rts_success = src->num_rts_success;
dst->num_rx_err = src->num_rx_err;
dst->num_rx_discard = src->num_rx_discard;
dst->num_tx_not_acked = src->num_tx_not_acked;
for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
dst->num_tx_frames[i] = src->num_tx_frames[i];
for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i];
for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i];
for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
dst->tx_rate_history[i] = src->tx_rate_history[i];
for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
dst->beacon_rssi_history[i] = src->beacon_rssi_history[i];
}
static void
ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
struct ath11k_fw_stats_bcn *dst)
{
dst->vdev_id = src->vdev_id;
dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt;
dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
}
static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_fw_stats_parse *parse = data;
const struct wmi_stats_event *ev = parse->ev;
struct ath11k_fw_stats *stats = parse->stats;
struct ath11k *ar;
struct ath11k_vif *arvif;
struct ieee80211_sta *sta;
struct ath11k_sta *arsta;
const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr;
int j, ret = 0;
if (tag != WMI_TAG_RSSI_STATS)
return -EPROTO;
rcu_read_lock();
ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"stats vdev id %d mac %pM\n",
stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr);
arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id);
if (!arvif) {
ath11k_warn(ab, "not found vif for vdev id %d\n",
stats_rssi->vdev_id);
ret = -EPROTO;
goto exit;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"stats bssid %pM vif %p\n",
arvif->bssid, arvif->vif);
sta = ieee80211_find_sta_by_ifaddr(ar->hw,
arvif->bssid,
NULL);
if (!sta) {
ath11k_dbg(ab, ATH11K_DBG_WMI,
"not found station of bssid %pM for rssi chain\n",
arvif->bssid);
goto exit;
}
arsta = (struct ath11k_sta *)sta->drv_priv;
BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) {
arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j];
ath11k_dbg(ab, ATH11K_DBG_WMI,
"stats beacon rssi[%d] %d data rssi[%d] %d\n",
j,
stats_rssi->rssi_avg_beacon[j],
j,
stats_rssi->rssi_avg_data[j]);
}
exit:
rcu_read_unlock();
return ret;
}
static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
struct wmi_tlv_fw_stats_parse *parse,
const void *ptr,
u16 len)
{
struct ath11k_fw_stats *stats = parse->stats;
const struct wmi_stats_event *ev = parse->ev;
struct ath11k *ar;
struct ath11k_vif *arvif;
struct ieee80211_sta *sta;
struct ath11k_sta *arsta;
int i, ret = 0;
const void *data = ptr;
if (!ev) {
ath11k_warn(ab, "failed to fetch update stats ev");
return -EPROTO;
}
stats->stats_id = 0;
rcu_read_lock();
ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
for (i = 0; i < ev->num_pdev_stats; i++) {
const struct wmi_pdev_stats *src;
struct ath11k_fw_stats_pdev *dst;
src = data;
if (len < sizeof(*src)) {
ret = -EPROTO;
goto exit;
}
stats->stats_id = WMI_REQUEST_PDEV_STAT;
data += sizeof(*src);
len -= sizeof(*src);
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath11k_wmi_pull_pdev_stats_base(&src->base, dst);
ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst);
ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst);
list_add_tail(&dst->list, &stats->pdevs);
}
for (i = 0; i < ev->num_vdev_stats; i++) {
const struct wmi_vdev_stats *src;
struct ath11k_fw_stats_vdev *dst;
src = data;
if (len < sizeof(*src)) {
ret = -EPROTO;
goto exit;
}
stats->stats_id = WMI_REQUEST_VDEV_STAT;
arvif = ath11k_mac_get_arvif(ar, src->vdev_id);
if (arvif) {
sta = ieee80211_find_sta_by_ifaddr(ar->hw,
arvif->bssid,
NULL);
if (sta) {
arsta = (struct ath11k_sta *)sta->drv_priv;
arsta->rssi_beacon = src->beacon_snr;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"stats vdev id %d snr %d\n",
src->vdev_id, src->beacon_snr);
} else {
ath11k_dbg(ab, ATH11K_DBG_WMI,
"not found station of bssid %pM for vdev stat\n",
arvif->bssid);
}
}
data += sizeof(*src);
len -= sizeof(*src);
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath11k_wmi_pull_vdev_stats(src, dst);
list_add_tail(&dst->list, &stats->vdevs);
}
for (i = 0; i < ev->num_bcn_stats; i++) {
const struct wmi_bcn_stats *src;
struct ath11k_fw_stats_bcn *dst;
src = data;
if (len < sizeof(*src)) {
ret = -EPROTO;
goto exit;
}
stats->stats_id = WMI_REQUEST_BCN_STAT;
data += sizeof(*src);
len -= sizeof(*src);
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath11k_wmi_pull_bcn_stats(src, dst);
list_add_tail(&dst->list, &stats->bcn);
}
exit:
rcu_read_unlock();
return ret;
}
static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_fw_stats_parse *parse = data;
int ret = 0;
switch (tag) {
case WMI_TAG_STATS_EVENT:
parse->ev = (struct wmi_stats_event *)ptr;
parse->stats->pdev_id = parse->ev->pdev_id;
break;
case WMI_TAG_ARRAY_BYTE:
ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
break;
case WMI_TAG_PER_CHAIN_RSSI_STATS:
parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr;
if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
parse->rssi_num = parse->rssi->num_per_chain_rssi_stats;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"stats id 0x%x num chain %d\n",
parse->ev->stats_id,
parse->rssi_num);
break;
case WMI_TAG_ARRAY_STRUCT:
if (parse->rssi_num && !parse->chain_rssi_done) {
ret = ath11k_wmi_tlv_iter(ab, ptr, len,
ath11k_wmi_tlv_rssi_chain_parse,
parse);
if (ret) {
ath11k_warn(ab, "failed to parse rssi chain %d\n",
ret);
return ret;
}
parse->chain_rssi_done = true;
}
break;
default:
break;
}
return ret;
}
int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
struct ath11k_fw_stats *stats)
{
struct wmi_tlv_fw_stats_parse parse = { };
stats->stats_id = 0;
parse.stats = stats;
return ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_fw_stats_parse,
&parse);
}
static void
ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
char *buf, u32 *length)
{
u32 len = *length;
u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath11k PDEV stats");
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Channel noise floor", pdev->ch_noise_floor);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Channel TX power", pdev->chan_tx_power);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"TX frame count", pdev->tx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RX frame count", pdev->rx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RX clear count", pdev->rx_clear_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Cycle count", pdev->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PHY error count", pdev->phy_err_count);
*length = len;
}
static void
ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
char *buf, u32 *length)
{
u32 len = *length;
u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
"ath11k PDEV TX stats");
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"====================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HTT cookies queued", pdev->comp_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HTT cookies disp.", pdev->comp_delivered);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDU queued", pdev->msdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU queued", pdev->mpdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDUs dropped", pdev->wmm_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Local enqued", pdev->local_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Local freed", pdev->local_freed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HW queued", pdev->hw_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs reaped", pdev->hw_reaped);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Num underruns", pdev->underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Num HW Paused", pdev->hw_paused);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs requeued", pdev->mpdus_requeued);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PPDU OK", pdev->tx_ko);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Excessive retries", pdev->tx_xretry);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"HW rate", pdev->data_rc);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Sched self triggers", pdev->self_triggers);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Dropped due to SW retries",
pdev->sw_retry_failure);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Illegal rate phy errors",
pdev->illgl_rate_phy_err);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PDEV continuous xretry", pdev->pdev_cont_xretry);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"TX timeout", pdev->pdev_tx_timeout);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PDEV resets", pdev->pdev_resets);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Stateless TIDs alloc failures",
pdev->stateless_tid_alloc_failure);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PHY underrun", pdev->phy_underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"MPDU is more than txop limit", pdev->txop_ovf);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num sequences posted", pdev->seq_posted);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num seq failed queueing ", pdev->seq_failed_queueing);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num sequences completed ", pdev->seq_completed);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num sequences restarted ", pdev->seq_restarted);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num of MU sequences posted ", pdev->mu_seq_posted);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num of MPDUS SW flushed ", pdev->mpdus_sw_flush);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num of MPDUS HW filtered ", pdev->mpdus_hw_filter);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num of MPDUS truncated ", pdev->mpdus_truncated);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num of MPDUS ACK failed ", pdev->mpdus_ack_failed);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Num of MPDUS expired ", pdev->mpdus_expired);
*length = len;
}
static void
ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
char *buf, u32 *length)
{
u32 len = *length;
u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
"ath11k PDEV RX stats");
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"====================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Mid PPDU route change",
pdev->mid_ppdu_route_change);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Tot. number of statuses", pdev->status_rcvd);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 0", pdev->r0_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 1", pdev->r1_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 2", pdev->r2_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 3", pdev->r3_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDUs delivered to HTT", pdev->htt_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to HTT", pdev->htt_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDUs delivered to stack", pdev->loc_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to stack", pdev->loc_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Oversized AMSUs", pdev->oversize_amsdu);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors", pdev->phy_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors drops", pdev->phy_err_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Overflow errors", pdev->rx_ovfl_errs);
*length = len;
}
static void
ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar,
const struct ath11k_fw_stats_vdev *vdev,
char *buf, u32 *length)
{
u32 len = *length;
u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id);
u8 *vif_macaddr;
int i;
/* VDEV stats has all the active VDEVs of other PDEVs as well,
* ignoring those not part of requested PDEV
*/
if (!arvif)
return;
vif_macaddr = arvif->vif->addr;
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"VDEV ID", vdev->vdev_id);
len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
"VDEV MAC address", vif_macaddr);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"beacon snr", vdev->beacon_snr);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"data snr", vdev->data_snr);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rx frames", vdev->num_rx_frames);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rts fail", vdev->num_rts_fail);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rts success", vdev->num_rts_success);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rx err", vdev->num_rx_err);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num rx discard", vdev->num_rx_discard);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"num tx not acked", vdev->num_tx_not_acked);
for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] %u\n",
"num tx frames", i,
vdev->num_tx_frames[i]);
for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] %u\n",
"num tx frames retries", i,
vdev->num_tx_frames_retries[i]);
for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] %u\n",
"num tx frames failures", i,
vdev->num_tx_frames_failures[i]);
for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] 0x%08x\n",
"tx rate history", i,
vdev->tx_rate_history[i]);
for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
len += scnprintf(buf + len, buf_len - len,
"%25s [%02d] %u\n",
"beacon rssi history", i,
vdev->beacon_rssi_history[i]);
len += scnprintf(buf + len, buf_len - len, "\n");
*length = len;
}
static void
ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar,
const struct ath11k_fw_stats_bcn *bcn,
char *buf, u32 *length)
{
u32 len = *length;
u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id);
u8 *vdev_macaddr;
if (!arvif) {
ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats",
bcn->vdev_id);
return;
}
vdev_macaddr = arvif->vif->addr;
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"VDEV ID", bcn->vdev_id);
len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
"VDEV MAC address", vdev_macaddr);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"================");
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Num of beacon tx success", bcn->tx_bcn_succ_cnt);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
len += scnprintf(buf + len, buf_len - len, "\n");
*length = len;
}
void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
struct ath11k_fw_stats *fw_stats,
u32 stats_id, char *buf)
{
u32 len = 0;
u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
const struct ath11k_fw_stats_pdev *pdev;
const struct ath11k_fw_stats_vdev *vdev;
const struct ath11k_fw_stats_bcn *bcn;
size_t num_bcn;
spin_lock_bh(&ar->data_lock);
if (stats_id == WMI_REQUEST_PDEV_STAT) {
pdev = list_first_entry_or_null(&fw_stats->pdevs,
struct ath11k_fw_stats_pdev, list);
if (!pdev) {
ath11k_warn(ar->ab, "failed to get pdev stats\n");
goto unlock;
}
ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
}
if (stats_id == WMI_REQUEST_VDEV_STAT) {
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath11k VDEV stats");
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
list_for_each_entry(vdev, &fw_stats->vdevs, list)
ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len);
}
if (stats_id == WMI_REQUEST_BCN_STAT) {
num_bcn = list_count_nodes(&fw_stats->bcn);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
"ath11k Beacon stats", num_bcn);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"===================");
list_for_each_entry(bcn, &fw_stats->bcn, list)
ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len);
}
unlock:
spin_unlock_bh(&ar->data_lock);
if (len >= buf_len)
buf[len - 1] = 0;
else
buf[len] = 0;
}
static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
{
/* try to send pending beacons first. they take priority */
wake_up(&ab->wmi_ab.tx_credits_wq);
}
static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
{
const struct wmi_11d_new_cc_ev *ev;
struct ath11k *ar;
struct ath11k_pdev *pdev;
const void **tb;
int ret, i;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
if (!ev) {
kfree(tb);
ath11k_warn(ab, "failed to fetch 11d new cc ev");
return -EPROTO;
}
spin_lock_bh(&ab->base_lock);
memcpy(&ab->new_alpha2, &ev->new_alpha2, 2);
spin_unlock_bh(&ab->base_lock);
ath11k_dbg(ab, ATH11K_DBG_WMI, "event 11d new cc %c%c\n",
ab->new_alpha2[0],
ab->new_alpha2[1]);
kfree(tb);
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
}
queue_work(ab->workqueue, &ab->update_11d_work);
return 0;
}
static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k_pdev_wmi *wmi = NULL;
u32 i;
u8 wmi_ep_count;
u8 eid;
eid = ATH11K_SKB_CB(skb)->eid;
dev_kfree_skb(skb);
if (eid >= ATH11K_HTC_EP_COUNT)
return;
wmi_ep_count = ab->htc.wmi_ep_count;
if (wmi_ep_count > ab->hw_params.max_radios)
return;
for (i = 0; i < ab->htc.wmi_ep_count; i++) {
if (ab->wmi_ab.wmi[i].eid == eid) {
wmi = &ab->wmi_ab.wmi[i];
break;
}
}
if (wmi)
wake_up(&wmi->tx_ce_desc_wq);
}
static bool ath11k_reg_is_world_alpha(char *alpha)
{
if (alpha[0] == '0' && alpha[1] == '0')
return true;
if (alpha[0] == 'n' && alpha[1] == 'a')
return true;
return false;
}
static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
struct sk_buff *skb,
enum wmi_reg_chan_list_cmd_type id)
{
struct cur_regulatory_info *reg_info = NULL;
struct ieee80211_regdomain *regd = NULL;
bool intersect = false;
int ret = 0, pdev_idx, i, j;
struct ath11k *ar;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
if (!reg_info) {
ret = -ENOMEM;
goto fallback;
}
if (id == WMI_REG_CHAN_LIST_CC_ID)
ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
else
ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
if (ret) {
ath11k_warn(ab, "failed to extract regulatory info from received event\n");
goto fallback;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg chan list id %d", id);
if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
/* In case of failure to set the requested ctry,
* fw retains the current regd. We print a failure info
* and return from here.
*/
ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
goto mem_free;
}
pdev_idx = reg_info->phy_id;
/* Avoid default reg rule updates sent during FW recovery if
* it is already available
*/
spin_lock(&ab->base_lock);
if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
ab->default_regd[pdev_idx]) {
spin_unlock(&ab->base_lock);
goto mem_free;
}
spin_unlock(&ab->base_lock);
if (pdev_idx >= ab->num_radios) {
/* Process the event for phy0 only if single_pdev_only
* is true. If pdev_idx is valid but not 0, discard the
* event. Otherwise, it goes to fallback.
*/
if (ab->hw_params.single_pdev_only &&
pdev_idx < ab->hw_params.num_rxmda_per_pdev)
goto mem_free;
else
goto fallback;
}
/* Avoid multiple overwrites to default regd, during core
* stop-start after mac registration.
*/
if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
!memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
(char *)reg_info->alpha2, 2))
goto mem_free;
/* Intersect new rules with default regd if a new country setting was
* requested, i.e a default regd was already set during initialization
* and the regd coming from this event has a valid country info.
*/
if (ab->default_regd[pdev_idx] &&
!ath11k_reg_is_world_alpha((char *)
ab->default_regd[pdev_idx]->alpha2) &&
!ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
intersect = true;
regd = ath11k_reg_build_regd(ab, reg_info, intersect);
if (!regd) {
ath11k_warn(ab, "failed to build regd from reg_info\n");
goto fallback;
}
spin_lock(&ab->base_lock);
if (ab->default_regd[pdev_idx]) {
/* The initial rules from FW after WMI Init is to build
* the default regd. From then on, any rules updated for
* the pdev could be due to user reg changes.
* Free previously built regd before assigning the newly
* generated regd to ar. NULL pointer handling will be
* taken care by kfree itself.
*/
ar = ab->pdevs[pdev_idx].ar;
kfree(ab->new_regd[pdev_idx]);
ab->new_regd[pdev_idx] = regd;
queue_work(ab->workqueue, &ar->regd_update_work);
} else {
/* This regd would be applied during mac registration and is
* held constant throughout for regd intersection purpose
*/
ab->default_regd[pdev_idx] = regd;
}
ab->dfs_region = reg_info->dfs_region;
spin_unlock(&ab->base_lock);
goto mem_free;
fallback:
/* Fallback to older reg (by sending previous country setting
* again if fw has succeeded and we failed to process here.
* The Regdomain should be uniform across driver and fw. Since the
* FW has processed the command and sent a success status, we expect
* this function to succeed as well. If it doesn't, CTRY needs to be
* reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
*/
/* TODO: This is rare, but still should also be handled */
WARN_ON(1);
mem_free:
if (reg_info) {
kfree(reg_info->reg_rules_2ghz_ptr);
kfree(reg_info->reg_rules_5ghz_ptr);
if (reg_info->is_ext_reg_event) {
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
}
kfree(reg_info);
}
return ret;
}
static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_rdy_parse *rdy_parse = data;
struct wmi_ready_event fixed_param;
struct wmi_mac_addr *addr_list;
struct ath11k_pdev *pdev;
u32 num_mac_addr;
int i;
switch (tag) {
case WMI_TAG_READY_EVENT:
memset(&fixed_param, 0, sizeof(fixed_param));
memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
min_t(u16, sizeof(fixed_param), len));
ab->wlan_init_status = fixed_param.ready_event_min.status;
rdy_parse->num_extra_mac_addr =
fixed_param.ready_event_min.num_extra_mac_addr;
ether_addr_copy(ab->mac_addr,
fixed_param.ready_event_min.mac_addr.addr);
ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
ab->wmi_ready = true;
break;
case WMI_TAG_ARRAY_FIXED_STRUCT:
addr_list = (struct wmi_mac_addr *)ptr;
num_mac_addr = rdy_parse->num_extra_mac_addr;
if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
break;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
}
ab->pdevs_macaddr_valid = true;
break;
default:
break;
}
return 0;
}
static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_tlv_rdy_parse rdy_parse = { };
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_rdy_parse, &rdy_parse);
if (ret) {
ath11k_warn(ab, "failed to parse tlv %d\n", ret);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event ready");
complete(&ab->wmi_ab.unified_ready);
return 0;
}
static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_delete_resp_event peer_del_resp;
struct ath11k *ar;
if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
ath11k_warn(ab, "failed to extract peer delete resp");
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer delete resp");
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d",
peer_del_resp.vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->peer_delete_done);
rcu_read_unlock();
ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
}
static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k *ar;
u32 vdev_id = 0;
if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
ath11k_warn(ab, "failed to extract vdev delete resp");
return;
}
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->vdev_delete_done);
rcu_read_unlock();
ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev delete resp for vdev id %d\n",
vdev_id);
}
static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status)
{
switch (vdev_resp_status) {
case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
return "invalid vdev id";
case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
return "not supported";
case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
return "dfs violation";
case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
return "invalid regdomain";
default:
return "unknown";
}
}
static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_vdev_start_resp_event vdev_start_resp;
struct ath11k *ar;
u32 status;
if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
ath11k_warn(ab, "failed to extract vdev start resp");
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event start resp event");
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d",
vdev_start_resp.vdev_id);
rcu_read_unlock();
return;
}
ar->last_wmi_vdev_start_status = 0;
status = vdev_start_resp.status;
if (WARN_ON_ONCE(status)) {
ath11k_warn(ab, "vdev start resp error status %d (%s)\n",
status, ath11k_wmi_vdev_resp_print(status));
ar->last_wmi_vdev_start_status = status;
}
complete(&ar->vdev_setup_done);
rcu_read_unlock();
ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d",
vdev_start_resp.vdev_id);
}
static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k_vif *arvif;
u32 vdev_id, tx_status;
if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
&vdev_id, &tx_status) != 0) {
ath11k_warn(ab, "failed to extract bcn tx status");
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event offload bcn tx status");
rcu_read_lock();
arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id);
if (!arvif) {
ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status",
vdev_id);
rcu_read_unlock();
return;
}
ath11k_mac_bcn_tx_event(arvif);
rcu_read_unlock();
}
static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
struct sk_buff *skb)
{
const struct wmi_peer_sta_ps_state_chg_event *ev;
struct ieee80211_sta *sta;
struct ath11k_peer *peer;
struct ath11k *ar;
struct ath11k_sta *arsta;
const void **tb;
enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch sta ps change ev");
kfree(tb);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event peer sta ps change ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
ev->peer_macaddr.addr, ev->peer_ps_state,
ev->ps_supported_bitmap, ev->peer_ps_valid,
ev->peer_ps_timestamp);
rcu_read_lock();
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr);
goto exit;
}
ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
if (!ar) {
spin_unlock_bh(&ab->base_lock);
ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d",
peer->vdev_id);
goto exit;
}
sta = peer->sta;
spin_unlock_bh(&ab->base_lock);
if (!sta) {
ath11k_warn(ab, "failed to find station entry %pM\n",
ev->peer_macaddr.addr);
goto exit;
}
arsta = (struct ath11k_sta *)sta->drv_priv;
spin_lock_bh(&ar->data_lock);
peer_previous_ps_state = arsta->peer_ps_state;
arsta->peer_ps_state = ev->peer_ps_state;
arsta->peer_current_ps_valid = !!ev->peer_ps_valid;
if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
ar->ab->wmi_ab.svc_map)) {
if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) ||
!(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) ||
!ev->peer_ps_valid)
goto out;
if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) {
arsta->ps_start_time = ev->peer_ps_timestamp;
arsta->ps_start_jiffies = jiffies;
} else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF &&
peer_previous_ps_state == WMI_PEER_PS_STATE_ON) {
arsta->ps_total_duration = arsta->ps_total_duration +
(ev->peer_ps_timestamp - arsta->ps_start_time);
}
if (ar->ps_timekeeper_enable)
trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr,
ev->peer_ps_timestamp,
arsta->peer_ps_state);
}
out:
spin_unlock_bh(&ar->data_lock);
exit:
rcu_read_unlock();
kfree(tb);
}
static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k *ar;
u32 vdev_id = 0;
if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
ath11k_warn(ab, "failed to extract vdev stopped event");
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev stopped");
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d",
vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->vdev_setup_done);
rcu_read_unlock();
ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
}
static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct mgmt_rx_event_params rx_ev = {0};
struct ath11k *ar;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
u16 fc;
struct ieee80211_supported_band *sband;
if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
ath11k_warn(ab, "failed to extract mgmt rx event");
dev_kfree_skb(skb);
return;
}
memset(status, 0, sizeof(*status));
ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx status %08x\n",
rx_ev.status);
rcu_read_lock();
ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
if (!ar) {
ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
rx_ev.pdev_id);
dev_kfree_skb(skb);
goto exit;
}
if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) ||
(rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
dev_kfree_skb(skb);
goto exit;
}
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ &&
rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
* mac80211 has been changed.
*/
WARN_ON_ONCE(1);
dev_kfree_skb(skb);
goto exit;
}
if (rx_ev.phy_mode == MODE_11B &&
(status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
ath11k_dbg(ab, ATH11K_DBG_WMI,
"mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
sband = &ar->mac.sbands[status->band];
if (status->band != NL80211_BAND_6GHZ)
status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
status->band);
status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
/* Firmware is guaranteed to report all essential management frames via
* WMI while it can deliver some extra via HTT. Since there can be
* duplicates split the reporting wrt monitor/sniffing.
*/
status->flag |= RX_FLAG_SKIP_MONITOR;
/* In case of PMF, FW delivers decrypted frames with Protected Bit set.
* Don't clear that. Also, FW delivers broadcast management frames
* (ex: group privacy action frames in mesh) as encrypted payload.
*/
if (ieee80211_has_protected(hdr->frame_control) &&
!is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
status->flag |= RX_FLAG_DECRYPTED;
if (!ieee80211_is_robust_mgmt_frame(skb)) {
status->flag |= RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(fc &
~IEEE80211_FCTL_PROTECTED);
}
}
if (ieee80211_is_beacon(hdr->frame_control))
ath11k_mac_handle_beacon(ar, skb);
ath11k_dbg(ab, ATH11K_DBG_MGMT,
"event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
ath11k_dbg(ab, ATH11K_DBG_MGMT,
"event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
status->freq, status->band, status->signal,
status->rate_idx);
ieee80211_rx_ni(ar->hw, skb);
exit:
rcu_read_unlock();
}
static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
struct ath11k *ar;
if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
ath11k_warn(ab, "failed to extract mgmt tx compl event");
return;
}
rcu_read_lock();
ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id);
if (!ar) {
ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
tx_compl_param.pdev_id);
goto exit;
}
wmi_process_mgmt_tx_comp(ar, &tx_compl_param);
ath11k_dbg(ab, ATH11K_DBG_MGMT,
"event mgmt tx compl ev pdev_id %d, desc_id %d, status %d ack_rssi %d",
tx_compl_param.pdev_id, tx_compl_param.desc_id,
tx_compl_param.status, tx_compl_param.ack_rssi);
exit:
rcu_read_unlock();
}
static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
u32 vdev_id,
enum ath11k_scan_state state)
{
int i;
struct ath11k_pdev *pdev;
struct ath11k *ar;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar) {
ar = pdev->ar;
spin_lock_bh(&ar->data_lock);
if (ar->scan.state == state &&
ar->scan.vdev_id == vdev_id) {
spin_unlock_bh(&ar->data_lock);
return ar;
}
spin_unlock_bh(&ar->data_lock);
}
}
return NULL;
}
static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k *ar;
struct wmi_scan_event scan_ev = {0};
if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
ath11k_warn(ab, "failed to extract scan event");
return;
}
rcu_read_lock();
/* In case the scan was cancelled, ex. during interface teardown,
* the interface will not be found in active interfaces.
* Rather, in such scenarios, iterate over the active pdev's to
* search 'ar' if the corresponding 'ar' scan is ABORTING and the
* aborting scan's vdev id matches this event info.
*/
if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
scan_ev.reason == WMI_SCAN_REASON_CANCELLED) {
ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
ATH11K_SCAN_ABORTING);
if (!ar)
ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
ATH11K_SCAN_RUNNING);
} else {
ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
}
if (!ar) {
ath11k_warn(ab, "Received scan event for unknown vdev");
rcu_read_unlock();
return;
}
spin_lock_bh(&ar->data_lock);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event scan %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
ath11k_scan_state_str(ar->scan.state), ar->scan.state);
switch (scan_ev.event_type) {
case WMI_SCAN_EVENT_STARTED:
ath11k_wmi_event_scan_started(ar);
break;
case WMI_SCAN_EVENT_COMPLETED:
ath11k_wmi_event_scan_completed(ar);
break;
case WMI_SCAN_EVENT_BSS_CHANNEL:
ath11k_wmi_event_scan_bss_chan(ar);
break;
case WMI_SCAN_EVENT_FOREIGN_CHAN:
ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq);
break;
case WMI_SCAN_EVENT_START_FAILED:
ath11k_warn(ab, "received scan start failure event\n");
ath11k_wmi_event_scan_start_failed(ar);
break;
case WMI_SCAN_EVENT_DEQUEUED:
__ath11k_mac_scan_finish(ar);
break;
case WMI_SCAN_EVENT_PREEMPTED:
case WMI_SCAN_EVENT_RESTARTED:
case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
default:
break;
}
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
}
static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_sta_kickout_arg arg = {};
struct ieee80211_sta *sta;
struct ath11k_peer *peer;
struct ath11k *ar;
u32 vdev_id;
if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
ath11k_warn(ab, "failed to extract peer sta kickout event");
return;
}
rcu_read_lock();
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_addr(ab, arg.mac_addr);
if (!peer) {
ath11k_warn(ab, "peer not found %pM\n",
arg.mac_addr);
spin_unlock_bh(&ab->base_lock);
goto exit;
}
vdev_id = peer->vdev_id;
spin_unlock_bh(&ab->base_lock);
ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
peer->vdev_id);
goto exit;
}
sta = ieee80211_find_sta_by_ifaddr(ar->hw,
arg.mac_addr, NULL);
if (!sta) {
ath11k_warn(ab, "Spurious quick kickout for STA %pM\n",
arg.mac_addr);
goto exit;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer sta kickout %pM",
arg.mac_addr);
ieee80211_report_low_ack(sta, 10);
exit:
rcu_read_unlock();
}
static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_roam_event roam_ev = {};
struct ath11k *ar;
if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
ath11k_warn(ab, "failed to extract roam event");
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event roam vdev %u reason 0x%08x rssi %d\n",
roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in roam ev %d",
roam_ev.vdev_id);
rcu_read_unlock();
return;
}
if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
roam_ev.reason, roam_ev.vdev_id);
switch (roam_ev.reason) {
case WMI_ROAM_REASON_BEACON_MISS:
ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
case WMI_ROAM_REASON_HO_FAILED:
ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
roam_ev.reason, roam_ev.vdev_id);
break;
}
rcu_read_unlock();
}
static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_chan_info_event ch_info_ev = {0};
struct ath11k *ar;
struct survey_info *survey;
int idx;
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
ath11k_warn(ab, "failed to extract chan info event");
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
ch_info_ev.mac_clk_mhz);
if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n");
return;
}
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in chan info ev %d",
ch_info_ev.vdev_id);
rcu_read_unlock();
return;
}
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
case ATH11K_SCAN_STARTING:
ath11k_warn(ab, "received chan info event without a scan request, ignoring\n");
goto exit;
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
break;
}
idx = freq_to_idx(ar, ch_info_ev.freq);
if (idx >= ARRAY_SIZE(ar->survey)) {
ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
ch_info_ev.freq, idx);
goto exit;
}
/* If FW provides MAC clock frequency in Mhz, overriding the initialized
* HW channel counters frequency value
*/
if (ch_info_ev.mac_clk_mhz)
cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
survey = &ar->survey[idx];
memset(survey, 0, sizeof(*survey));
survey->noise = ch_info_ev.noise_floor;
survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY;
survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz);
survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz);
}
exit:
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
}
static void
ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
struct survey_info *survey;
struct ath11k *ar;
u32 cc_freq_hz = ab->cc_freq_hz;
u64 busy, total, tx, rx, rx_bss;
int idx;
if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
ath11k_warn(ab, "failed to extract pdev bss chan info event");
return;
}
busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 |
bss_ch_info_ev.rx_clear_count_low;
total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 |
bss_ch_info_ev.cycle_count_low;
tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 |
bss_ch_info_ev.tx_cycle_count_low;
rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 |
bss_ch_info_ev.rx_cycle_count_low;
rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 |
bss_ch_info_ev.rx_bss_cycle_count_low;
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
bss_ch_info_ev.noise_floor, busy, total,
tx, rx, rx_bss);
rcu_read_lock();
ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id);
if (!ar) {
ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
bss_ch_info_ev.pdev_id);
rcu_read_unlock();
return;
}
spin_lock_bh(&ar->data_lock);
idx = freq_to_idx(ar, bss_ch_info_ev.freq);
if (idx >= ARRAY_SIZE(ar->survey)) {
ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
bss_ch_info_ev.freq, idx);
goto exit;
}
survey = &ar->survey[idx];
survey->noise = bss_ch_info_ev.noise_floor;
survey->time = div_u64(total, cc_freq_hz);
survey->time_busy = div_u64(busy, cc_freq_hz);
survey->time_rx = div_u64(rx_bss, cc_freq_hz);
survey->time_tx = div_u64(tx, cc_freq_hz);
survey->filled |= (SURVEY_INFO_NOISE_DBM |
SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_TX);
exit:
spin_unlock_bh(&ar->data_lock);
complete(&ar->bss_survey_done);
rcu_read_unlock();
}
static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
struct ath11k *ar;
if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
ath11k_warn(ab, "failed to extract install key compl event");
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
install_key_compl.key_idx, install_key_compl.key_flags,
install_key_compl.macaddr, install_key_compl.status);
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in install key compl ev %d",
install_key_compl.vdev_id);
rcu_read_unlock();
return;
}
ar->install_key_status = 0;
if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
ath11k_warn(ab, "install key failed for %pM status %d\n",
install_key_compl.macaddr, install_key_compl.status);
ar->install_key_status = install_key_compl.status;
}
complete(&ar->install_key_done);
rcu_read_unlock();
}
static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
const struct wmi_service_available_event *ev;
u32 *wmi_ext2_service_bitmap;
int i, j;
switch (tag) {
case WMI_TAG_SERVICE_AVAILABLE_EVENT:
ev = (struct wmi_service_available_event *)ptr;
for (i = 0, j = WMI_MAX_SERVICE;
i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
i++) {
do {
if (ev->wmi_service_segment_bitmap[i] &
BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
set_bit(j, ab->wmi_ab.svc_map);
} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
ev->wmi_service_segment_bitmap[0],
ev->wmi_service_segment_bitmap[1],
ev->wmi_service_segment_bitmap[2],
ev->wmi_service_segment_bitmap[3]);
break;
case WMI_TAG_ARRAY_UINT32:
wmi_ext2_service_bitmap = (u32 *)ptr;
for (i = 0, j = WMI_MAX_EXT_SERVICE;
i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
i++) {
do {
if (wmi_ext2_service_bitmap[i] &
BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
set_bit(j, ab->wmi_ab.svc_map);
} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
break;
}
return 0;
}
static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
{
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_services_parser,
NULL);
if (ret)
ath11k_warn(ab, "failed to parse services available tlv %d\n", ret);
ath11k_dbg(ab, ATH11K_DBG_WMI, "event service available");
}
static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
struct ath11k *ar;
if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
ath11k_warn(ab, "failed to extract peer assoc conf event");
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event peer assoc conf ev vdev id %d macaddr %pM\n",
peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
peer_assoc_conf.vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->peer_assoc_done);
rcu_read_unlock();
}
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k_fw_stats stats = {};
struct ath11k *ar;
int ret;
INIT_LIST_HEAD(&stats.pdevs);
INIT_LIST_HEAD(&stats.vdevs);
INIT_LIST_HEAD(&stats.bcn);
ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
if (ret) {
ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
goto free;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event update stats");
rcu_read_lock();
ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
if (!ar) {
rcu_read_unlock();
ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
stats.pdev_id, ret);
goto free;
}
spin_lock_bh(&ar->data_lock);
/* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
* debugfs fw stats. Therefore, processing it separately.
*/
if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
ar->fw_stats_done = true;
goto complete;
}
/* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
* are currently requested only via debugfs fw stats. Hence, processing these
* in debugfs context
*/
ath11k_debugfs_fw_stats_process(ar, &stats);
complete:
complete(&ar->fw_stats_complete);
rcu_read_unlock();
spin_unlock_bh(&ar->data_lock);
/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
* at this point, no need to free the individual list.
*/
return;
free:
ath11k_fw_stats_free(&stats);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
* is not part of BDF CTL(Conformance test limits) table entries.
*/
static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
kfree(tb);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event pdev ctl failsafe check status %d\n",
ev->ctl_failsafe_status);
/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
* to 10 dBm else the CTL power entry in the BDF would be picked up.
*/
if (ev->ctl_failsafe_status != 0)
ath11k_warn(ab, "pdev ctl failsafe failure status %d",
ev->ctl_failsafe_status);
kfree(tb);
}
static void
ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
const struct wmi_pdev_csa_switch_ev *ev,
const u32 *vdev_ids)
{
int i;
struct ath11k_vif *arvif;
/* Finish CSA once the switch count becomes NULL */
if (ev->current_switch_count)
return;
rcu_read_lock();
for (i = 0; i < ev->num_vdevs; i++) {
arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
if (!arvif) {
ath11k_warn(ab, "Recvd csa status for unknown vdev %d",
vdev_ids[i]);
continue;
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
ieee80211_csa_finish(arvif->vif);
}
rcu_read_unlock();
}
static void
ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_pdev_csa_switch_ev *ev;
const u32 *vdev_ids;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
if (!ev || !vdev_ids) {
ath11k_warn(ab, "failed to fetch pdev csa switch count ev");
kfree(tb);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event pdev csa switch count %d for pdev %d, num_vdevs %d",
ev->current_switch_count, ev->pdev_id,
ev->num_vdevs);
ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
kfree(tb);
}
static void
ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb)
{
const void **tb;
const struct wmi_pdev_radar_ev *ev;
struct ath11k *ar;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev");
kfree(tb);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI,
"event pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
ev->freq_offset, ev->sidx);
ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
if (!ar) {
ath11k_warn(ab, "radar detected in invalid pdev %d\n",
ev->pdev_id);
goto exit;
}
ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n",
ev->pdev_id);
if (ar->dfs_block_radar_events)
ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
ieee80211_radar_detected(ar->hw);
exit:
kfree(tb);
}
static void
ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k *ar;
const void **tb;
const struct wmi_pdev_temperature_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch pdev temp ev");
kfree(tb);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n",
ev->temp, ev->pdev_id);
ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
if (!ar) {
ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
kfree(tb);
return;
}
ath11k_thermal_event_temperature(ar, ev->temp);
kfree(tb);
}
static void ath11k_fils_discovery_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_fils_discovery_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
"failed to parse FILS discovery event tlv %d\n",
ret);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event fils discovery");
ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch FILS discovery event\n");
kfree(tb);
return;
}
ath11k_warn(ab,
"FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
ev->vdev_id, ev->fils_tt, ev->tbtt);
kfree(tb);
}
static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
"failed to parse probe response transmission status event tlv: %d\n",
ret);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event probe resp tx status");
ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
if (!ev) {
ath11k_warn(ab,
"failed to fetch probe response transmission status event");
kfree(tb);
return;
}
if (ev->tx_status)
ath11k_warn(ab,
"Probe response transmission failed for vdev_id %u, status %u\n",
ev->vdev_id, ev->tx_status);
kfree(tb);
}
static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_wow_ev_arg *ev = data;
const char *wow_pg_fault;
int wow_pg_len;
switch (tag) {
case WMI_TAG_WOW_EVENT_INFO:
memcpy(ev, ptr, sizeof(*ev));
ath11k_dbg(ab, ATH11K_DBG_WMI, "wow wakeup host reason %d %s\n",
ev->wake_reason, wow_reason(ev->wake_reason));
break;
case WMI_TAG_ARRAY_BYTE:
if (ev && ev->wake_reason == WOW_REASON_PAGE_FAULT) {
wow_pg_fault = ptr;
/* the first 4 bytes are length */
wow_pg_len = *(int *)wow_pg_fault;
wow_pg_fault += sizeof(int);
ath11k_dbg(ab, ATH11K_DBG_WMI, "wow data_len = %d\n",
wow_pg_len);
ath11k_dbg_dump(ab, ATH11K_DBG_WMI,
"wow_event_info_type packet present",
"wow_pg_fault ",
wow_pg_fault,
wow_pg_len);
}
break;
default:
break;
}
return 0;
}
static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_wow_ev_arg ev = { };
int ret;
ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
ath11k_wmi_tlv_wow_wakeup_host_parse,
&ev);
if (ret) {
ath11k_warn(ab, "failed to parse wmi wow tlv: %d\n", ret);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event wow wakeup host");
complete(&ab->wow.wakeup_completed);
}
static void
ath11k_wmi_diag_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
ath11k_dbg(ab, ATH11K_DBG_WMI, "event diag");
trace_ath11k_wmi_diag(ab, skb->data, skb->len);
}
static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
{
switch (status) {
case WMI_ADD_TWT_STATUS_OK:
return "ok";
case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
return "twt disabled";
case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
return "dialog id in use";
case WMI_ADD_TWT_STATUS_INVALID_PARAM:
return "invalid parameters";
case WMI_ADD_TWT_STATUS_NOT_READY:
return "not ready";
case WMI_ADD_TWT_STATUS_NO_RESOURCE:
return "resource unavailable";
case WMI_ADD_TWT_STATUS_NO_ACK:
return "no ack";
case WMI_ADD_TWT_STATUS_NO_RESPONSE:
return "no response";
case WMI_ADD_TWT_STATUS_DENIED:
return "denied";
case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
fallthrough;
default:
return "unknown error";
}
}
static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_twt_add_dialog_event *ev;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
"failed to parse wmi twt add dialog status event tlv: %d\n",
ret);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event twt add dialog");
ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
goto exit;
}
if (ev->status)
ath11k_warn(ab,
"wmi add twt dialog event vdev %d dialog id %d status %s\n",
ev->vdev_id, ev->dialog_id,
ath11k_wmi_twt_add_dialog_event_status(ev->status));
exit:
kfree(tb);
}
static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_gtk_offload_status_event *ev;
struct ath11k_vif *arvif;
__be64 replay_ctr_be;
u64 replay_ctr;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch gtk offload status ev");
kfree(tb);
return;
}
arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
if (!arvif) {
ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
ev->vdev_id);
kfree(tb);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n",
ev->refresh_cnt);
ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt",
NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES);
replay_ctr = ev->replay_ctr.word1;
replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0;
arvif->rekey_data.replay_ctr = replay_ctr;
/* supplicant expects big-endian replay counter */
replay_ctr_be = cpu_to_be64(replay_ctr);
ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
(void *)&replay_ctr_be, GFP_ATOMIC);
kfree(tb);
}
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_tlv_event_id id;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
trace_ath11k_wmi_event(ab, id, skb->data, skb->len);
if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
goto out;
switch (id) {
/* Process all the WMI events here */
case WMI_SERVICE_READY_EVENTID:
ath11k_service_ready_event(ab, skb);
break;
case WMI_SERVICE_READY_EXT_EVENTID:
ath11k_service_ready_ext_event(ab, skb);
break;
case WMI_SERVICE_READY_EXT2_EVENTID:
ath11k_service_ready_ext2_event(ab, skb);
break;
case WMI_REG_CHAN_LIST_CC_EVENTID:
ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_ID);
break;
case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_EXT_ID);
break;
case WMI_READY_EVENTID:
ath11k_ready_event(ab, skb);
break;
case WMI_PEER_DELETE_RESP_EVENTID:
ath11k_peer_delete_resp_event(ab, skb);
break;
case WMI_VDEV_START_RESP_EVENTID:
ath11k_vdev_start_resp_event(ab, skb);
break;
case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
ath11k_bcn_tx_status_event(ab, skb);
break;
case WMI_VDEV_STOPPED_EVENTID:
ath11k_vdev_stopped_event(ab, skb);
break;
case WMI_MGMT_RX_EVENTID:
ath11k_mgmt_rx_event(ab, skb);
/* mgmt_rx_event() owns the skb now! */
return;
case WMI_MGMT_TX_COMPLETION_EVENTID:
ath11k_mgmt_tx_compl_event(ab, skb);
break;
case WMI_SCAN_EVENTID:
ath11k_scan_event(ab, skb);
break;
case WMI_PEER_STA_KICKOUT_EVENTID:
ath11k_peer_sta_kickout_event(ab, skb);
break;
case WMI_ROAM_EVENTID:
ath11k_roam_event(ab, skb);
break;
case WMI_CHAN_INFO_EVENTID:
ath11k_chan_info_event(ab, skb);
break;
case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
ath11k_pdev_bss_chan_info_event(ab, skb);
break;
case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
ath11k_vdev_install_key_compl_event(ab, skb);
break;
case WMI_SERVICE_AVAILABLE_EVENTID:
ath11k_service_available_event(ab, skb);
break;
case WMI_PEER_ASSOC_CONF_EVENTID:
ath11k_peer_assoc_conf_event(ab, skb);
break;
case WMI_UPDATE_STATS_EVENTID:
ath11k_update_stats_event(ab, skb);
break;
case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
ath11k_pdev_ctl_failsafe_check_event(ab, skb);
break;
case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
break;
case WMI_PDEV_UTF_EVENTID:
ath11k_tm_wmi_event(ab, id, skb);
break;
case WMI_PDEV_TEMPERATURE_EVENTID:
ath11k_wmi_pdev_temperature_event(ab, skb);
break;
case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
break;
case WMI_HOST_FILS_DISCOVERY_EVENTID:
ath11k_fils_discovery_event(ab, skb);
break;
case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
ath11k_probe_resp_tx_status_event(ab, skb);
break;
case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
ath11k_wmi_obss_color_collision_event(ab, skb);
break;
case WMI_TWT_ADD_DIALOG_EVENTID:
ath11k_wmi_twt_add_dialog_event(ab, skb);
break;
case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
break;
case WMI_VDEV_DELETE_RESP_EVENTID:
ath11k_vdev_delete_resp_event(ab, skb);
break;
case WMI_WOW_WAKEUP_HOST_EVENTID:
ath11k_wmi_event_wow_wakeup_host(ab, skb);
break;
case WMI_11D_NEW_COUNTRY_EVENTID:
ath11k_reg_11d_new_cc_event(ab, skb);
break;
case WMI_DIAG_EVENTID:
ath11k_wmi_diag_event(ab, skb);
break;
case WMI_PEER_STA_PS_STATECHG_EVENTID:
ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
break;
case WMI_GTK_OFFLOAD_STATUS_EVENTID:
ath11k_wmi_gtk_offload_status_event(ab, skb);
break;
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id);
break;
}
out:
dev_kfree_skb(skb);
}
static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
u32 pdev_idx)
{
int status;
u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
struct ath11k_htc_svc_conn_req conn_req;
struct ath11k_htc_svc_conn_resp conn_resp;
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
/* these fields are the same for all service endpoints */
conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx;
conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = svc_id[pdev_idx];
status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
if (status) {
ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
status);
return status;
}
ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq);
return 0;
}
static int
ath11k_wmi_send_unit_test_cmd(struct ath11k *ar,
struct wmi_unit_test_cmd ut_cmd,
u32 *test_args)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_unit_test_cmd *cmd;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 *ut_cmd_args;
int buf_len, arg_len;
int ret;
int i;
arg_len = sizeof(u32) * ut_cmd.num_args;
buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_unit_test_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE);
cmd->vdev_id = ut_cmd.vdev_id;
cmd->module_id = ut_cmd.module_id;
cmd->num_args = ut_cmd.num_args;
cmd->diag_token = ut_cmd.diag_token;
ptr = skb->data + sizeof(ut_cmd);
tlv = ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, arg_len);
ptr += TLV_HDR_SIZE;
ut_cmd_args = ptr;
for (i = 0; i < ut_cmd.num_args; i++)
ut_cmd_args[i] = test_args[i];
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
ret);
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"cmd unit test module %d vdev %d n_args %d token %d\n",
cmd->module_id, cmd->vdev_id, cmd->num_args,
cmd->diag_token);
return ret;
}
int ath11k_wmi_simulate_radar(struct ath11k *ar)
{
struct ath11k_vif *arvif;
u32 dfs_args[DFS_MAX_TEST_ARGS];
struct wmi_unit_test_cmd wmi_ut;
bool arvif_found = false;
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arvif_found = true;
break;
}
}
if (!arvif_found)
return -EINVAL;
dfs_args[DFS_TEST_CMDID] = 0;
dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
/* Currently we could pass segment_id(b0 - b1), chirp(b2)
* freq offset (b3 - b10) to unit test. For simulation
* purpose this can be set to 0 which is valid.
*/
dfs_args[DFS_TEST_RADAR_PARAM] = 0;
wmi_ut.vdev_id = arvif->vdev_id;
wmi_ut.module_id = DFS_UNIT_TEST_MODULE;
wmi_ut.num_args = DFS_MAX_TEST_ARGS;
wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN;
ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n");
return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
}
int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
struct ath11k_fw_dbglog *dbglog)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_debug_log_config_cmd_fixed_param *cmd;
struct sk_buff *skb;
struct wmi_tlv *tlv;
int ret, len;
len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->dbg_log_param = dbglog->param;
tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
switch (dbglog->param) {
case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
cmd->value = dbglog->value;
break;
case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
cmd->value = dbglog->value;
memcpy(tlv->value, module_id_bitmap,
MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
/* clear current config to be used for next user config */
memset(module_id_bitmap, 0,
MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
break;
default:
dev_kfree_skb(skb);
return -EINVAL;
}
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
if (ret) {
ath11k_warn(ar->ab,
"failed to send WMI_DBGLOG_CFG_CMDID\n");
dev_kfree_skb(skb);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd dbglog cfg");
return ret;
}
int ath11k_wmi_connect(struct ath11k_base *ab)
{
u32 i;
u8 wmi_ep_count;
wmi_ep_count = ab->htc.wmi_ep_count;
if (wmi_ep_count > ab->hw_params.max_radios)
return -1;
for (i = 0; i < wmi_ep_count; i++)
ath11k_connect_pdev_htc_service(ab, i);
return 0;
}
static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id)
{
if (WARN_ON(pdev_id >= MAX_RADIOS))
return;
/* TODO: Deinit any pdev specific wmi resource */
}
int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
u8 pdev_id)
{
struct ath11k_pdev_wmi *wmi_handle;
if (pdev_id >= ab->hw_params.max_radios)
return -EINVAL;
wmi_handle = &ab->wmi_ab.wmi[pdev_id];
wmi_handle->wmi_ab = &ab->wmi_ab;
ab->wmi_ab.ab = ab;
/* TODO: Init remaining resource specific to pdev */
return 0;
}
int ath11k_wmi_attach(struct ath11k_base *ab)
{
int ret;
ret = ath11k_wmi_pdev_attach(ab, 0);
if (ret)
return ret;
ab->wmi_ab.ab = ab;
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
/* It's overwritten when service_ext_ready is handled */
if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
/* TODO: Init remaining wmi soc resources required */
init_completion(&ab->wmi_ab.service_ready);
init_completion(&ab->wmi_ab.unified_ready);
return 0;
}
void ath11k_wmi_detach(struct ath11k_base *ab)
{
int i;
/* TODO: Deinit wmi resource specific to SOC as required */
for (i = 0; i < ab->htc.wmi_ep_count; i++)
ath11k_wmi_pdev_detach(ab, i);
ath11k_wmi_free_dbring_caps(ab);
}
int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
u32 filter_bitmap, bool enable)
{
struct wmi_hw_data_filter_cmd *cmd;
struct sk_buff *skb;
int len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->enable = enable;
/* Set all modes in case of disable */
if (cmd->enable)
cmd->hw_filter_bitmap = filter_bitmap;
else
cmd->hw_filter_bitmap = ((u32)~0U);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"hw data filter enable %d filter_bitmap 0x%x\n",
enable, filter_bitmap);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
}
int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar)
{
struct wmi_wow_host_wakeup_ind *cmd;
struct sk_buff *skb;
size_t len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_wow_host_wakeup_ind *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow host wakeup ind\n");
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
}
int ath11k_wmi_wow_enable(struct ath11k *ar)
{
struct wmi_wow_enable_cmd *cmd;
struct sk_buff *skb;
int len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_wow_enable_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->enable = 1;
cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow enable\n");
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
}
int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
const u8 mac_addr[ETH_ALEN])
{
struct sk_buff *skb;
struct wmi_scan_prob_req_oui_cmd *cmd;
u32 prob_req_oui;
int len;
prob_req_oui = (((u32)mac_addr[0]) << 16) |
(((u32)mac_addr[1]) << 8) | mac_addr[2];
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->prob_req_oui = prob_req_oui;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "scan prob req oui %d\n",
prob_req_oui);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
}
int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable)
{
struct wmi_wow_add_del_event_cmd *cmd;
struct sk_buff *skb;
size_t len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->is_add = enable;
cmd->event_bitmap = (1 << event);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add wakeup event %s enable %d vdev_id %d\n",
wow_wakeup_event(event), enable, vdev_id);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
}
int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
const u8 *pattern, const u8 *mask,
int pattern_len, int pattern_offset)
{
struct wmi_wow_add_pattern_cmd *cmd;
struct wmi_wow_bitmap_pattern *bitmap;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u8 *ptr;
size_t len;
len = sizeof(*cmd) +
sizeof(*tlv) + /* array struct */
sizeof(*bitmap) + /* bitmap */
sizeof(*tlv) + /* empty ipv4 sync */
sizeof(*tlv) + /* empty ipv6 sync */
sizeof(*tlv) + /* empty magic */
sizeof(*tlv) + /* empty info timeout */
sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
/* cmd */
ptr = (u8 *)skb->data;
cmd = (struct wmi_wow_add_pattern_cmd *)ptr;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_WOW_ADD_PATTERN_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->pattern_id = pattern_id;
cmd->pattern_type = WOW_BITMAP_PATTERN;
ptr += sizeof(*cmd);
/* bitmap */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap));
ptr += sizeof(*tlv);
bitmap = (struct wmi_wow_bitmap_pattern *)ptr;
bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_WOW_BITMAP_PATTERN_T) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE);
memcpy(bitmap->patternbuf, pattern, pattern_len);
ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4));
memcpy(bitmap->bitmaskbuf, mask, pattern_len);
ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4));
bitmap->pattern_offset = pattern_offset;
bitmap->pattern_len = pattern_len;
bitmap->bitmask_len = pattern_len;
bitmap->pattern_id = pattern_id;
ptr += sizeof(*bitmap);
/* ipv4 sync */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, 0);
ptr += sizeof(*tlv);
/* ipv6 sync */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, 0);
ptr += sizeof(*tlv);
/* magic */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, 0);
ptr += sizeof(*tlv);
/* pattern info timeout */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, 0);
ptr += sizeof(*tlv);
/* ratelimit interval */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n",
vdev_id, pattern_id, pattern_offset);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
}
int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id)
{
struct wmi_wow_del_pattern_cmd *cmd;
struct sk_buff *skb;
size_t len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_WOW_DEL_PATTERN_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->pattern_id = pattern_id;
cmd->pattern_type = WOW_BITMAP_PATTERN;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow del pattern vdev_id %d pattern_id %d\n",
vdev_id, pattern_id);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
}
static struct sk_buff *
ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar,
u32 vdev_id,
struct wmi_pno_scan_req *pno)
{
struct nlo_configured_parameters *nlo_list;
struct wmi_wow_nlo_config_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u32 *channel_list;
size_t len, nlo_list_len, channel_list_len;
u8 *ptr;
u32 i;
len = sizeof(*cmd) +
sizeof(*tlv) +
/* TLV place holder for array of structures
* nlo_configured_parameters(nlo_list)
*/
sizeof(*tlv);
/* TLV place holder for array of uint32 channel_list */
channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
len += channel_list_len;
nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
len += nlo_list_len;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (u8 *)skb->data;
cmd = (struct wmi_wow_nlo_config_cmd *)ptr;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = pno->vdev_id;
cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN;
/* current FW does not support min-max range for dwell time */
cmd->active_dwell_time = pno->active_max_time;
cmd->passive_dwell_time = pno->passive_max_time;
if (pno->do_passive_scan)
cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE;
cmd->fast_scan_period = pno->fast_scan_period;
cmd->slow_scan_period = pno->slow_scan_period;
cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles;
cmd->delay_start_time = pno->delay_start_time;
if (pno->enable_pno_scan_randomization) {
cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
ath11k_ce_byte_swap(cmd->mac_addr.addr, 8);
ath11k_ce_byte_swap(cmd->mac_mask.addr, 8);
}
ptr += sizeof(*cmd);
/* nlo_configured_parameters(nlo_list) */
cmd->no_of_ssids = pno->uc_networks_count;
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, nlo_list_len);
ptr += sizeof(*tlv);
nlo_list = (struct nlo_configured_parameters *)ptr;
for (i = 0; i < cmd->no_of_ssids; i++) {
tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv));
nlo_list[i].ssid.valid = true;
nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
memcpy(nlo_list[i].ssid.ssid.ssid,
pno->a_networks[i].ssid.ssid,
nlo_list[i].ssid.ssid.ssid_len);
ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid,
roundup(nlo_list[i].ssid.ssid.ssid_len, 4));
if (pno->a_networks[i].rssi_threshold &&
pno->a_networks[i].rssi_threshold > -300) {
nlo_list[i].rssi_cond.valid = true;
nlo_list[i].rssi_cond.rssi =
pno->a_networks[i].rssi_threshold;
}
nlo_list[i].bcast_nw_type.valid = true;
nlo_list[i].bcast_nw_type.bcast_nw_type =
pno->a_networks[i].bcast_nw_type;
}
ptr += nlo_list_len;
cmd->num_of_channels = pno->a_networks[0].channel_count;
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, channel_list_len);
ptr += sizeof(*tlv);
channel_list = (u32 *)ptr;
for (i = 0; i < cmd->num_of_channels; i++)
channel_list[i] = pno->a_networks[0].channels[i];
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv start pno config vdev_id %d\n",
vdev_id);
return skb;
}
static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar,
u32 vdev_id)
{
struct wmi_wow_nlo_config_cmd *cmd;
struct sk_buff *skb;
size_t len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return ERR_PTR(-ENOMEM);
cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->flags = WMI_NLO_CONFIG_STOP;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"tlv stop pno config vdev_id %d\n", vdev_id);
return skb;
}
int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
struct wmi_pno_scan_req *pno_scan)
{
struct sk_buff *skb;
if (pno_scan->enable)
skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
else
skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id);
if (IS_ERR_OR_NULL(skb))
return -ENOMEM;
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
}
static void ath11k_wmi_fill_ns_offload(struct ath11k *ar,
struct ath11k_arp_ns_offload *offload,
u8 **ptr,
bool enable,
bool ext)
{
struct wmi_ns_offload_tuple *ns;
struct wmi_tlv *tlv;
u8 *buf_ptr = *ptr;
u32 ns_cnt, ns_ext_tuples;
int i, max_offloads;
ns_cnt = offload->ipv6_count;
tlv = (struct wmi_tlv *)buf_ptr;
if (ext) {
ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns));
i = WMI_MAX_NS_OFFLOADS;
max_offloads = offload->ipv6_count;
} else {
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns));
i = 0;
max_offloads = WMI_MAX_NS_OFFLOADS;
}
buf_ptr += sizeof(*tlv);
for (; i < max_offloads; i++) {
ns = (struct wmi_ns_offload_tuple *)buf_ptr;
ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE);
if (enable) {
if (i < ns_cnt)
ns->flags |= WMI_NSOL_FLAGS_VALID;
memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
ath11k_ce_byte_swap(ns->target_ipaddr[0], 16);
ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16);
if (offload->ipv6_type[i])
ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST;
memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
ath11k_ce_byte_swap(ns->target_mac.addr, 8);
if (ns->target_mac.word0 != 0 ||
ns->target_mac.word1 != 0) {
ns->flags |= WMI_NSOL_FLAGS_MAC_VALID;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"index %d ns_solicited %pI6 target %pI6",
i, ns->solicitation_ipaddr,
ns->target_ipaddr[0]);
}
buf_ptr += sizeof(*ns);
}
*ptr = buf_ptr;
}
static void ath11k_wmi_fill_arp_offload(struct ath11k *ar,
struct ath11k_arp_ns_offload *offload,
u8 **ptr,
bool enable)
{
struct wmi_arp_offload_tuple *arp;
struct wmi_tlv *tlv;
u8 *buf_ptr = *ptr;
int i;
/* fill arp tuple */
tlv = (struct wmi_tlv *)buf_ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
buf_ptr += sizeof(*tlv);
for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
arp = (struct wmi_arp_offload_tuple *)buf_ptr;
arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
if (enable && i < offload->ipv4_count) {
/* Copy the target ip addr and flags */
arp->flags = WMI_ARPOL_FLAGS_VALID;
memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
ath11k_ce_byte_swap(arp->target_ipaddr, 4);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "arp offload address %pI4",
arp->target_ipaddr);
}
buf_ptr += sizeof(*arp);
}
*ptr = buf_ptr;
}
int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
struct ath11k_vif *arvif, bool enable)
{
struct ath11k_arp_ns_offload *offload;
struct wmi_set_arp_ns_offload_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u8 *buf_ptr;
size_t len;
u8 ns_cnt, ns_ext_tuples = 0;
offload = &arvif->arp_ns_offload;
ns_cnt = offload->ipv6_count;
len = sizeof(*cmd) +
sizeof(*tlv) +
WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) +
sizeof(*tlv) +
WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple);
if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
len += sizeof(*tlv) +
ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple);
}
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
buf_ptr = skb->data;
cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->flags = 0;
cmd->vdev_id = arvif->vdev_id;
cmd->num_ns_ext_tuples = ns_ext_tuples;
buf_ptr += sizeof(*cmd);
ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
if (ns_ext_tuples)
ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
}
int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
struct ath11k_vif *arvif, bool enable)
{
struct wmi_gtk_rekey_offload_cmd *cmd;
struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
int len;
struct sk_buff *skb;
__le64 replay_ctr;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = arvif->vdev_id;
if (enable) {
cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE;
/* the length in rekey_data and cmd is equal */
memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES);
memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES);
replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
memcpy(cmd->replay_ctr, &replay_ctr,
sizeof(replay_ctr));
ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES);
} else {
cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
arvif->vdev_id, enable);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
}
int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
struct ath11k_vif *arvif)
{
struct wmi_gtk_rekey_offload_cmd *cmd;
int len;
struct sk_buff *skb;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = arvif->vdev_id;
cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
arvif->vdev_id);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
}
int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val)
{ struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_set_sar_table_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u8 *buf_ptr;
u32 len, sar_len_aligned, rsvd_len_aligned;
sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32));
rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32));
len = sizeof(*cmd) +
TLV_HDR_SIZE + sar_len_aligned +
TLV_HDR_SIZE + rsvd_len_aligned;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
cmd->sar_len = BIOS_SAR_TABLE_LEN;
cmd->rsvd_len = BIOS_SAR_RSVD1_LEN;
buf_ptr = skb->data + sizeof(*cmd);
tlv = (struct wmi_tlv *)buf_ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, sar_len_aligned);
buf_ptr += TLV_HDR_SIZE;
memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN);
buf_ptr += sar_len_aligned;
tlv = (struct wmi_tlv *)buf_ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
}
int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_set_geo_table_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u8 *buf_ptr;
u32 len, rsvd_len_aligned;
rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32));
len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
cmd->rsvd_len = BIOS_SAR_RSVD2_LEN;
buf_ptr = skb->data + sizeof(*cmd);
tlv = (struct wmi_tlv *)buf_ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
}
int ath11k_wmi_sta_keepalive(struct ath11k *ar,
const struct wmi_sta_keepalive_arg *arg)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_sta_keepalive_cmd *cmd;
struct wmi_sta_keepalive_arp_resp *arp;
struct sk_buff *skb;
size_t len;
len = sizeof(*cmd) + sizeof(*arp);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_STA_KEEPALIVE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = arg->vdev_id;
cmd->enabled = arg->enabled;
cmd->interval = arg->interval;
cmd->method = arg->method;
arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
arp->src_ip4_addr = arg->src_ip4_addr;
arp->dest_ip4_addr = arg->dest_ip4_addr;
ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"sta keepalive vdev %d enabled %d method %d interval %d\n",
arg->vdev_id, arg->enabled, arg->method, arg->interval);
return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/wmi.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
*/
#include <linux/vmalloc.h>
#include "debugfs.h"
#include "core.h"
#include "debug.h"
#include "wmi.h"
#include "hal_rx.h"
#include "dp_tx.h"
#include "debugfs_htt_stats.h"
#include "peer.h"
#include "hif.h"
static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
"REO2SW1_RING",
"REO2SW2_RING",
"REO2SW3_RING",
"REO2SW4_RING",
"WBM2REO_LINK_RING",
"REO2TCL_RING",
"REO2FW_RING",
"RELEASE_RING",
"PPE_RELEASE_RING",
"TCL2TQM_RING",
"TQM_RELEASE_RING",
"REO_RELEASE_RING",
"WBM2SW0_RELEASE_RING",
"WBM2SW1_RELEASE_RING",
"WBM2SW2_RELEASE_RING",
"WBM2SW3_RELEASE_RING",
"REO_CMD_RING",
"REO_STATUS_RING",
};
static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
"FW2RXDMA_BUF_RING",
"FW2RXDMA_STATUS_RING",
"FW2RXDMA_LINK_RING",
"SW2RXDMA_BUF_RING",
"WBM2RXDMA_LINK_RING",
"RXDMA2FW_RING",
"RXDMA2SW_RING",
"RXDMA2RELEASE_RING",
"RXDMA2REO_RING",
"MONITOR_STATUS_RING",
"MONITOR_BUF_RING",
"MONITOR_DESC_RING",
"MONITOR_DEST_RING",
};
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
struct hal_srng *srng)
{
struct ath11k_debug_dbr *dbr_debug;
struct ath11k_dbg_dbr_data *dbr_data;
struct ath11k_dbg_dbr_entry *entry;
if (id >= WMI_DIRECT_BUF_MAX || event >= ATH11K_DBG_DBR_EVENT_MAX)
return;
dbr_debug = ar->debug.dbr_debug[id];
if (!dbr_debug)
return;
if (!dbr_debug->dbr_debug_enabled)
return;
dbr_data = &dbr_debug->dbr_dbg_data;
spin_lock_bh(&dbr_data->lock);
if (dbr_data->entries) {
entry = &dbr_data->entries[dbr_data->dbr_debug_idx];
entry->hp = srng->u.src_ring.hp;
entry->tp = *srng->u.src_ring.tp_addr;
entry->timestamp = jiffies;
entry->event = event;
dbr_data->dbr_debug_idx++;
if (dbr_data->dbr_debug_idx ==
dbr_data->num_ring_debug_entries)
dbr_data->dbr_debug_idx = 0;
}
spin_unlock_bh(&dbr_data->lock);
}
static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
{
spin_lock_bh(&ar->data_lock);
ar->fw_stats_done = false;
ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
spin_unlock_bh(&ar->data_lock);
}
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_pdev *pdev;
bool is_end;
static unsigned int num_vdev, num_bcn;
size_t total_vdevs_started = 0;
int i;
/* WMI_REQUEST_PDEV_STAT request has been already processed */
if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
ar->fw_stats_done = true;
return;
}
if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
if (list_empty(&stats->vdevs)) {
ath11k_warn(ab, "empty vdev stats");
return;
}
/* FW sends all the active VDEV stats irrespective of PDEV,
* hence limit until the count of all VDEVs started
*/
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar)
total_vdevs_started += ar->num_started_vdevs;
}
is_end = ((++num_vdev) == total_vdevs_started);
list_splice_tail_init(&stats->vdevs,
&ar->fw_stats.vdevs);
if (is_end) {
ar->fw_stats_done = true;
num_vdev = 0;
}
return;
}
if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
if (list_empty(&stats->bcn)) {
ath11k_warn(ab, "empty bcn stats");
return;
}
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
is_end = ((++num_bcn) == ar->num_started_vdevs);
list_splice_tail_init(&stats->bcn,
&ar->fw_stats.bcn);
if (is_end) {
ar->fw_stats_done = true;
num_bcn = 0;
}
}
}
static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
struct stats_request_params *req_param)
{
struct ath11k_base *ab = ar->ab;
unsigned long timeout, time_left;
int ret;
lockdep_assert_held(&ar->conf_mutex);
/* FW stats can get split when exceeding the stats data buffer limit.
* In that case, since there is no end marking for the back-to-back
* received 'update stats' event, we keep a 3 seconds timeout in case,
* fw_stats_done is not marked yet
*/
timeout = jiffies + msecs_to_jiffies(3 * 1000);
ath11k_debugfs_fw_stats_reset(ar);
reinit_completion(&ar->fw_stats_complete);
ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
if (ret) {
ath11k_warn(ab, "could not request fw stats (%d)\n",
ret);
return ret;
}
time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
if (!time_left)
return -ETIMEDOUT;
for (;;) {
if (time_after(jiffies, timeout))
break;
spin_lock_bh(&ar->data_lock);
if (ar->fw_stats_done) {
spin_unlock_bh(&ar->data_lock);
break;
}
spin_unlock_bh(&ar->data_lock);
}
return 0;
}
int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
u32 vdev_id, u32 stats_id)
{
struct ath11k_base *ab = ar->ab;
struct stats_request_params req_param;
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto err_unlock;
}
req_param.pdev_id = pdev_id;
req_param.vdev_id = vdev_id;
req_param.stats_id = stats_id;
ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
if (ret)
ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
pdev_id, vdev_id, stats_id);
err_unlock:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
struct ath11k_base *ab = ar->ab;
struct stats_request_params req_param;
void *buf = NULL;
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto err_unlock;
}
buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
if (!buf) {
ret = -ENOMEM;
goto err_unlock;
}
req_param.pdev_id = ar->pdev->pdev_id;
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_PDEV_STAT;
ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
goto err_free;
}
ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
file->private_data = buf;
mutex_unlock(&ar->conf_mutex);
return 0;
err_free:
vfree(buf);
err_unlock:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
{
vfree(file->private_data);
return 0;
}
static ssize_t ath11k_read_pdev_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
const char *buf = file->private_data;
size_t len = strlen(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_pdev_stats = {
.open = ath11k_open_pdev_stats,
.release = ath11k_release_pdev_stats,
.read = ath11k_read_pdev_stats,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
struct stats_request_params req_param;
void *buf = NULL;
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto err_unlock;
}
buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
if (!buf) {
ret = -ENOMEM;
goto err_unlock;
}
req_param.pdev_id = ar->pdev->pdev_id;
/* VDEV stats is always sent for all active VDEVs from FW */
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_VDEV_STAT;
ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
goto err_free;
}
ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
file->private_data = buf;
mutex_unlock(&ar->conf_mutex);
return 0;
err_free:
vfree(buf);
err_unlock:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
{
vfree(file->private_data);
return 0;
}
static ssize_t ath11k_read_vdev_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
const char *buf = file->private_data;
size_t len = strlen(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_vdev_stats = {
.open = ath11k_open_vdev_stats,
.release = ath11k_release_vdev_stats,
.read = ath11k_read_vdev_stats,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
struct ath11k_vif *arvif;
struct stats_request_params req_param;
void *buf = NULL;
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto err_unlock;
}
buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
if (!buf) {
ret = -ENOMEM;
goto err_unlock;
}
req_param.stats_id = WMI_REQUEST_BCN_STAT;
req_param.pdev_id = ar->pdev->pdev_id;
/* loop all active VDEVs for bcn stats */
list_for_each_entry(arvif, &ar->arvifs, list) {
if (!arvif->is_up)
continue;
req_param.vdev_id = arvif->vdev_id;
ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
goto err_free;
}
}
ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
/* since beacon stats request is looped for all active VDEVs, saved fw
* stats is not freed for each request until done for all active VDEVs
*/
spin_lock_bh(&ar->data_lock);
ath11k_fw_stats_bcn_free(&ar->fw_stats.bcn);
spin_unlock_bh(&ar->data_lock);
file->private_data = buf;
mutex_unlock(&ar->conf_mutex);
return 0;
err_free:
vfree(buf);
err_unlock:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
{
vfree(file->private_data);
return 0;
}
static ssize_t ath11k_read_bcn_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
const char *buf = file->private_data;
size_t len = strlen(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_bcn_stats = {
.open = ath11k_open_bcn_stats,
.release = ath11k_release_bcn_stats,
.read = ath11k_read_bcn_stats,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
const char buf[] =
"To simulate firmware crash write one of the keywords to this file:\n"
"`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
"`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
/* Simulate firmware crash:
* 'soft': Call wmi command causing firmware hang. This firmware hang is
* recoverable by warm firmware reset.
* 'hard': Force firmware crash by setting any vdev parameter for not allowed
* vdev id. This is hard firmware crash because it is recoverable only by cold
* firmware reset.
*/
static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k_base *ab = file->private_data;
struct ath11k_pdev *pdev;
struct ath11k *ar = ab->pdevs[0].ar;
char buf[32] = {0};
ssize_t rc;
int i, ret, radioup = 0;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ar && ar->state == ATH11K_STATE_ON) {
radioup = 1;
break;
}
}
/* filter partial writes and invalid commands */
if (*ppos != 0 || count >= sizeof(buf) || count == 0)
return -EINVAL;
rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (rc < 0)
return rc;
/* drop the possible '\n' from the end */
if (buf[*ppos - 1] == '\n')
buf[*ppos - 1] = '\0';
if (radioup == 0) {
ret = -ENETDOWN;
goto exit;
}
if (!strcmp(buf, "assert")) {
ath11k_info(ab, "simulating firmware assert crash\n");
ret = ath11k_wmi_force_fw_hang_cmd(ar,
ATH11K_WMI_FW_HANG_ASSERT_TYPE,
ATH11K_WMI_FW_HANG_DELAY);
} else if (!strcmp(buf, "hw-restart")) {
ath11k_info(ab, "user requested hw restart\n");
queue_work(ab->workqueue_aux, &ab->reset_work);
ret = 0;
} else {
ret = -EINVAL;
goto exit;
}
if (ret) {
ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
goto exit;
}
ret = count;
exit:
return ret;
}
static const struct file_operations fops_simulate_fw_crash = {
.read = ath11k_read_simulate_fw_crash,
.write = ath11k_write_simulate_fw_crash,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
u32 filter;
int ret;
if (kstrtouint_from_user(ubuf, count, 0, &filter))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
if (filter == ar->debug.extd_tx_stats) {
ret = count;
goto out;
}
ar->debug.extd_tx_stats = filter;
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
char __user *ubuf,
size_t count, loff_t *ppos)
{
char buf[32] = {0};
struct ath11k *ar = file->private_data;
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
ar->debug.extd_tx_stats);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_extd_tx_stats = {
.read = ath11k_read_enable_extd_tx_stats,
.write = ath11k_write_enable_extd_tx_stats,
.open = simple_open
};
static ssize_t ath11k_write_extd_rx_stats(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
struct ath11k_base *ab = ar->ab;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 enable, rx_filter = 0, ring_id;
int i;
int ret;
if (kstrtouint_from_user(ubuf, count, 0, &enable))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto exit;
}
if (enable > 1) {
ret = -EINVAL;
goto exit;
}
if (enable == ar->debug.extd_rx_stats) {
ret = count;
goto exit;
}
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
ar->debug.extd_rx_stats = enable;
ret = count;
goto exit;
}
if (enable) {
rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
tlv_filter.rx_filter = rx_filter;
tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
HTT_RX_FP_DATA_FILTER_FLASG3;
} else {
tlv_filter = ath11k_mac_mon_status_filter_default;
}
ar->debug.rx_filter = tlv_filter.rx_filter;
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
DP_RX_BUFFER_SIZE, &tlv_filter);
if (ret) {
ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
goto exit;
}
}
ar->debug.extd_rx_stats = enable;
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath11k_read_extd_rx_stats(struct file *file,
char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32];
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
ar->debug.extd_rx_stats);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_extd_rx_stats = {
.read = ath11k_read_extd_rx_stats,
.write = ath11k_write_extd_rx_stats,
.open = simple_open,
};
static int ath11k_fill_bp_stats(struct ath11k_base *ab,
struct ath11k_bp_stats *bp_stats,
char *buf, int len, int size)
{
lockdep_assert_held(&ab->base_lock);
len += scnprintf(buf + len, size - len, "count: %u\n",
bp_stats->count);
len += scnprintf(buf + len, size - len, "hp: %u\n",
bp_stats->hp);
len += scnprintf(buf + len, size - len, "tp: %u\n",
bp_stats->tp);
len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
jiffies_to_msecs(jiffies - bp_stats->jiffies));
return len;
}
static ssize_t ath11k_debugfs_dump_soc_ring_bp_stats(struct ath11k_base *ab,
char *buf, int size)
{
struct ath11k_bp_stats *bp_stats;
bool stats_rxd = false;
u8 i, pdev_idx;
int len = 0;
len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
len += scnprintf(buf + len, size - len, "==================\n");
spin_lock_bh(&ab->base_lock);
for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
if (!bp_stats->count)
continue;
len += scnprintf(buf + len, size - len, "Ring: %s\n",
htt_bp_umac_ring[i]);
len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
stats_rxd = true;
}
for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
bp_stats =
&ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
if (!bp_stats->count)
continue;
len += scnprintf(buf + len, size - len, "Ring: %s\n",
htt_bp_lmac_ring[i]);
len += scnprintf(buf + len, size - len, "pdev: %d\n",
pdev_idx);
len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
stats_rxd = true;
}
}
spin_unlock_bh(&ab->base_lock);
if (!stats_rxd)
len += scnprintf(buf + len, size - len,
"No Ring Backpressure stats received\n\n");
return len;
}
static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k_base *ab = file->private_data;
struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats;
int len = 0, i, retval;
const int size = 4096;
static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
"Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
"Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
"AMSDU parse", "SA timeout", "DA timeout",
"Flow timeout", "Flush req"};
static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
"Desc addr zero", "Desc inval", "AMPDU in non BA",
"Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
"Frame OOR", "BAR OOR", "No BA session",
"Frame SN equal SSN", "PN check fail", "2k err",
"PN err", "Desc blocked"};
char *buf;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
soc_stats->err_ring_pkts);
len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
soc_stats->invalid_rbm);
len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
len += scnprintf(buf + len, size - len, "%s: %u\n",
rxdma_err[i], soc_stats->rxdma_error[i]);
len += scnprintf(buf + len, size - len, "\nREO errors:\n");
for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
len += scnprintf(buf + len, size - len, "%s: %u\n",
reo_err[i], soc_stats->reo_error[i]);
len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
len += scnprintf(buf + len, size - len,
"ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
soc_stats->hal_reo_error[0],
soc_stats->hal_reo_error[1],
soc_stats->hal_reo_error[2],
soc_stats->hal_reo_error[3]);
len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
for (i = 0; i < ab->hw_params.max_tx_ring; i++)
len += scnprintf(buf + len, size - len, "ring%d: %u\n",
i, soc_stats->tx_err.desc_na[i]);
len += scnprintf(buf + len, size - len,
"\nMisc Transmit Failures: %d\n",
atomic_read(&soc_stats->tx_err.misc_fail));
len += ath11k_debugfs_dump_soc_ring_bp_stats(ab, buf + len, size - len);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
}
static const struct file_operations fops_soc_dp_stats = {
.read = ath11k_debugfs_dump_soc_dp_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_write_fw_dbglog(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[128] = {0};
struct ath11k_fw_dbglog dbglog;
unsigned int param, mod_id_index, is_end;
u64 value;
int ret, num;
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
num = sscanf(buf, "%u %llx %u %u", ¶m, &value, &mod_id_index, &is_end);
if (num < 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (param == WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP ||
param == WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP) {
if (num != 4 || mod_id_index > (MAX_MODULE_ID_BITMAP_WORDS - 1)) {
ret = -EINVAL;
goto out;
}
ar->debug.module_id_bitmap[mod_id_index] = upper_32_bits(value);
if (!is_end) {
ret = count;
goto out;
}
} else {
if (num != 2) {
ret = -EINVAL;
goto out;
}
}
dbglog.param = param;
dbglog.value = lower_32_bits(value);
ret = ath11k_wmi_fw_dbglog_cfg(ar, ar->debug.module_id_bitmap, &dbglog);
if (ret) {
ath11k_warn(ar->ab, "fw dbglog config failed from debugfs: %d\n",
ret);
goto out;
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_fw_dbglog = {
.write = ath11k_write_fw_dbglog,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int ath11k_open_sram_dump(struct inode *inode, struct file *file)
{
struct ath11k_base *ab = inode->i_private;
u8 *buf;
u32 start, end;
int ret;
start = ab->hw_params.sram_dump.start;
end = ab->hw_params.sram_dump.end;
buf = vmalloc(end - start + 1);
if (!buf)
return -ENOMEM;
ret = ath11k_hif_read(ab, buf, start, end);
if (ret) {
ath11k_warn(ab, "failed to dump sram: %d\n", ret);
vfree(buf);
return ret;
}
file->private_data = buf;
return 0;
}
static ssize_t ath11k_read_sram_dump(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k_base *ab = file->f_inode->i_private;
const char *buf = file->private_data;
int len;
u32 start, end;
start = ab->hw_params.sram_dump.start;
end = ab->hw_params.sram_dump.end;
len = end - start + 1;
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static int ath11k_release_sram_dump(struct inode *inode, struct file *file)
{
vfree(file->private_data);
file->private_data = NULL;
return 0;
}
static const struct file_operations fops_sram_dump = {
.open = ath11k_open_sram_dump,
.read = ath11k_read_sram_dump,
.release = ath11k_release_sram_dump,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
{
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
&fops_simulate_fw_crash);
debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
&fops_soc_dp_stats);
if (ab->hw_params.sram_dump.start != 0)
debugfs_create_file("sram", 0400, ab->debugfs_soc, ab,
&fops_sram_dump);
return 0;
}
void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
{
debugfs_remove_recursive(ab->debugfs_soc);
ab->debugfs_soc = NULL;
}
int ath11k_debugfs_soc_create(struct ath11k_base *ab)
{
struct dentry *root;
bool dput_needed;
char name[64];
int ret;
root = debugfs_lookup("ath11k", NULL);
if (!root) {
root = debugfs_create_dir("ath11k", NULL);
if (IS_ERR_OR_NULL(root))
return PTR_ERR(root);
dput_needed = false;
} else {
/* a dentry from lookup() needs dput() after we don't use it */
dput_needed = true;
}
scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus),
dev_name(ab->dev));
ab->debugfs_soc = debugfs_create_dir(name, root);
if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
ret = PTR_ERR(ab->debugfs_soc);
goto out;
}
ret = 0;
out:
if (dput_needed)
dput(root);
return ret;
}
void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
{
debugfs_remove_recursive(ab->debugfs_soc);
ab->debugfs_soc = NULL;
/* We are not removing ath11k directory on purpose, even if it
* would be empty. This simplifies the directory handling and it's
* a minor cosmetic issue to leave an empty ath11k directory to
* debugfs.
*/
}
EXPORT_SYMBOL(ath11k_debugfs_soc_destroy);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
{
struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
ar->debug.debugfs_pdev);
ar->fw_stats.debugfs_fwstats = fwstats_dir;
/* all stats debugfs files created are under "fw_stats" directory
* created per PDEV
*/
debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
&fops_pdev_stats);
debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
&fops_vdev_stats);
debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
&fops_bcn_stats);
}
static ssize_t ath11k_write_pktlog_filter(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
struct ath11k_base *ab = ar->ab;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 rx_filter = 0, ring_id, filter, mode;
u8 buf[128] = {0};
int i, ret, rx_buf_sz = 0;
ssize_t rc;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (rc < 0) {
ret = rc;
goto out;
}
buf[rc] = '\0';
ret = sscanf(buf, "0x%x %u", &filter, &mode);
if (ret != 2) {
ret = -EINVAL;
goto out;
}
if (filter) {
ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
if (ret) {
ath11k_warn(ar->ab,
"failed to enable pktlog filter %x: %d\n",
ar->debug.pktlog_filter, ret);
goto out;
}
} else {
ret = ath11k_wmi_pdev_pktlog_disable(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
goto out;
}
}
/* Clear rx filter set for monitor mode and rx status */
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
rx_buf_sz, &tlv_filter);
if (ret) {
ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
goto out;
}
}
#define HTT_RX_FILTER_TLV_LITE_MODE \
(HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
if (mode == ATH11K_PKTLOG_MODE_FULL) {
rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
rx_buf_sz = DP_RX_BUFFER_SIZE;
} else if (mode == ATH11K_PKTLOG_MODE_LITE) {
ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_PKTLOG);
if (ret) {
ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
goto out;
}
rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
} else {
rx_buf_sz = DP_RX_BUFFER_SIZE;
tlv_filter = ath11k_mac_mon_status_filter_default;
rx_filter = tlv_filter.rx_filter;
ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_DEFAULT);
if (ret) {
ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
ret);
goto out;
}
}
tlv_filter.rx_filter = rx_filter;
if (rx_filter) {
tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
HTT_RX_FP_DATA_FILTER_FLASG3;
}
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
ar->dp.mac_id + i,
HAL_RXDMA_MONITOR_STATUS,
rx_buf_sz, &tlv_filter);
if (ret) {
ath11k_warn(ab, "failed to set rx filter for monitor status ring\n");
goto out;
}
}
ath11k_info(ab, "pktlog mode %s\n",
((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
ar->debug.pktlog_filter = filter;
ar->debug.pktlog_mode = mode;
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath11k_read_pktlog_filter(struct file *file,
char __user *ubuf,
size_t count, loff_t *ppos)
{
char buf[32] = {0};
struct ath11k *ar = file->private_data;
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
ar->debug.pktlog_filter,
ar->debug.pktlog_mode);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_pktlog_filter = {
.read = ath11k_read_pktlog_filter,
.write = ath11k_write_pktlog_filter,
.open = simple_open
};
static ssize_t ath11k_write_simulate_radar(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
int ret;
ret = ath11k_wmi_simulate_radar(ar);
if (ret)
return ret;
return count;
}
static const struct file_operations fops_simulate_radar = {
.write = ath11k_write_simulate_radar,
.open = simple_open
};
static ssize_t ath11k_debug_dump_dbr_entries(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k_dbg_dbr_data *dbr_dbg_data = file->private_data;
static const char * const event_id_to_string[] = {"empty", "Rx", "Replenish"};
int size = ATH11K_DEBUG_DBR_ENTRIES_MAX * 100;
char *buf;
int i, ret;
int len = 0;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
len += scnprintf(buf + len, size - len,
"-----------------------------------------\n");
len += scnprintf(buf + len, size - len,
"| idx | hp | tp | timestamp | event |\n");
len += scnprintf(buf + len, size - len,
"-----------------------------------------\n");
spin_lock_bh(&dbr_dbg_data->lock);
for (i = 0; i < dbr_dbg_data->num_ring_debug_entries; i++) {
len += scnprintf(buf + len, size - len,
"|%4u|%8u|%8u|%11llu|%8s|\n", i,
dbr_dbg_data->entries[i].hp,
dbr_dbg_data->entries[i].tp,
dbr_dbg_data->entries[i].timestamp,
event_id_to_string[dbr_dbg_data->entries[i].event]);
}
spin_unlock_bh(&dbr_dbg_data->lock);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
static const struct file_operations fops_debug_dump_dbr_entries = {
.read = ath11k_debug_dump_dbr_entries,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static void ath11k_debugfs_dbr_dbg_destroy(struct ath11k *ar, int dbr_id)
{
struct ath11k_debug_dbr *dbr_debug;
struct ath11k_dbg_dbr_data *dbr_dbg_data;
if (!ar->debug.dbr_debug[dbr_id])
return;
dbr_debug = ar->debug.dbr_debug[dbr_id];
dbr_dbg_data = &dbr_debug->dbr_dbg_data;
debugfs_remove_recursive(dbr_debug->dbr_debugfs);
kfree(dbr_dbg_data->entries);
kfree(dbr_debug);
ar->debug.dbr_debug[dbr_id] = NULL;
}
static int ath11k_debugfs_dbr_dbg_init(struct ath11k *ar, int dbr_id)
{
struct ath11k_debug_dbr *dbr_debug;
struct ath11k_dbg_dbr_data *dbr_dbg_data;
static const char * const dbr_id_to_str[] = {"spectral", "CFR"};
if (ar->debug.dbr_debug[dbr_id])
return 0;
ar->debug.dbr_debug[dbr_id] = kzalloc(sizeof(*dbr_debug),
GFP_KERNEL);
if (!ar->debug.dbr_debug[dbr_id])
return -ENOMEM;
dbr_debug = ar->debug.dbr_debug[dbr_id];
dbr_dbg_data = &dbr_debug->dbr_dbg_data;
if (dbr_debug->dbr_debugfs)
return 0;
dbr_debug->dbr_debugfs = debugfs_create_dir(dbr_id_to_str[dbr_id],
ar->debug.debugfs_pdev);
if (IS_ERR_OR_NULL(dbr_debug->dbr_debugfs)) {
if (IS_ERR(dbr_debug->dbr_debugfs))
return PTR_ERR(dbr_debug->dbr_debugfs);
return -ENOMEM;
}
dbr_debug->dbr_debug_enabled = true;
dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX;
dbr_dbg_data->dbr_debug_idx = 0;
dbr_dbg_data->entries = kcalloc(ATH11K_DEBUG_DBR_ENTRIES_MAX,
sizeof(struct ath11k_dbg_dbr_entry),
GFP_KERNEL);
if (!dbr_dbg_data->entries)
return -ENOMEM;
spin_lock_init(&dbr_dbg_data->lock);
debugfs_create_file("dump_dbr_debug", 0444, dbr_debug->dbr_debugfs,
dbr_dbg_data, &fops_debug_dump_dbr_entries);
return 0;
}
static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32] = {0};
u32 dbr_id, enable;
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
goto out;
buf[ret] = '\0';
ret = sscanf(buf, "%u %u", &dbr_id, &enable);
if (ret != 2 || dbr_id > 1 || enable > 1) {
ret = -EINVAL;
ath11k_warn(ar->ab, "usage: echo <dbr_id> <val> dbr_id:0-Spectral 1-CFR val:0-disable 1-enable\n");
goto out;
}
if (enable) {
ret = ath11k_debugfs_dbr_dbg_init(ar, dbr_id);
if (ret) {
ath11k_warn(ar->ab, "db ring module debugfs init failed: %d\n",
ret);
goto out;
}
} else {
ath11k_debugfs_dbr_dbg_destroy(ar, dbr_id);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_dbr_debug = {
.write = ath11k_debugfs_write_enable_dbr_dbg,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_write_ps_timekeeper_enable(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
ssize_t ret;
u8 ps_timekeeper_enable;
if (kstrtou8_from_user(user_buf, count, 0, &ps_timekeeper_enable))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto exit;
}
if (!ar->ps_state_enable) {
ret = -EINVAL;
goto exit;
}
ar->ps_timekeeper_enable = !!ps_timekeeper_enable;
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath11k_read_ps_timekeeper_enable(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32];
int len;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_timekeeper_enable);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_ps_timekeeper_enable = {
.read = ath11k_read_ps_timekeeper_enable,
.write = ath11k_write_ps_timekeeper_enable,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static void ath11k_reset_peer_ps_duration(void *data,
struct ieee80211_sta *sta)
{
struct ath11k *ar = data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
spin_lock_bh(&ar->data_lock);
arsta->ps_total_duration = 0;
spin_unlock_bh(&ar->data_lock);
}
static ssize_t ath11k_write_reset_ps_duration(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
int ret;
u8 reset_ps_duration;
if (kstrtou8_from_user(user_buf, count, 0, &reset_ps_duration))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto exit;
}
if (!ar->ps_state_enable) {
ret = -EINVAL;
goto exit;
}
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_reset_peer_ps_duration,
ar);
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_reset_ps_duration = {
.write = ath11k_write_reset_ps_duration,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static void ath11k_peer_ps_state_disable(void *data,
struct ieee80211_sta *sta)
{
struct ath11k *ar = data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
spin_lock_bh(&ar->data_lock);
arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
arsta->ps_start_time = 0;
arsta->ps_total_duration = 0;
spin_unlock_bh(&ar->data_lock);
}
static ssize_t ath11k_write_ps_state_enable(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
struct ath11k_pdev *pdev = ar->pdev;
int ret;
u32 param;
u8 ps_state_enable;
if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ps_state_enable = !!ps_state_enable;
if (ar->ps_state_enable == ps_state_enable) {
ret = count;
goto exit;
}
param = WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE;
ret = ath11k_wmi_pdev_set_param(ar, param, ps_state_enable, pdev->pdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to enable ps_state_enable: %d\n",
ret);
goto exit;
}
ar->ps_state_enable = ps_state_enable;
if (!ar->ps_state_enable) {
ar->ps_timekeeper_enable = false;
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_peer_ps_state_disable,
ar);
}
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath11k_read_ps_state_enable(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32];
int len;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_state_enable);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_ps_state_enable = {
.read = ath11k_read_ps_state_enable,
.write = ath11k_write_ps_state_enable,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath11k_debugfs_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
char pdev_name[5];
char buf[100] = {0};
snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
if (IS_ERR(ar->debug.debugfs_pdev))
return PTR_ERR(ar->debug.debugfs_pdev);
/* Create a symlink under ieee80211/phy* */
snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
ath11k_debugfs_htt_stats_init(ar);
ath11k_debugfs_fw_stats_init(ar);
debugfs_create_file("ext_tx_stats", 0644,
ar->debug.debugfs_pdev, ar,
&fops_extd_tx_stats);
debugfs_create_file("ext_rx_stats", 0644,
ar->debug.debugfs_pdev, ar,
&fops_extd_rx_stats);
debugfs_create_file("pktlog_filter", 0644,
ar->debug.debugfs_pdev, ar,
&fops_pktlog_filter);
debugfs_create_file("fw_dbglog_config", 0600,
ar->debug.debugfs_pdev, ar,
&fops_fw_dbglog);
if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
debugfs_create_file("dfs_simulate_radar", 0200,
ar->debug.debugfs_pdev, ar,
&fops_simulate_radar);
debugfs_create_bool("dfs_block_radar_events", 0200,
ar->debug.debugfs_pdev,
&ar->dfs_block_radar_events);
}
if (ab->hw_params.dbr_debug_support)
debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
ar, &fops_dbr_debug);
debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_pdev, ar,
&fops_ps_state_enable);
if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
ar->ab->wmi_ab.svc_map)) {
debugfs_create_file("ps_timekeeper_enable", 0600,
ar->debug.debugfs_pdev, ar,
&fops_ps_timekeeper_enable);
debugfs_create_file("reset_ps_duration", 0200,
ar->debug.debugfs_pdev, ar,
&fops_reset_ps_duration);
}
return 0;
}
void ath11k_debugfs_unregister(struct ath11k *ar)
{
struct ath11k_debug_dbr *dbr_debug;
struct ath11k_dbg_dbr_data *dbr_dbg_data;
int i;
for (i = 0; i < WMI_DIRECT_BUF_MAX; i++) {
dbr_debug = ar->debug.dbr_debug[i];
if (!dbr_debug)
continue;
dbr_dbg_data = &dbr_debug->dbr_dbg_data;
kfree(dbr_dbg_data->entries);
debugfs_remove_recursive(dbr_debug->dbr_debugfs);
kfree(dbr_debug);
ar->debug.dbr_debug[i] = NULL;
}
}
static ssize_t ath11k_write_twt_add_dialog(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_add_dialog_params params = { 0 };
struct wmi_twt_enable_params twt_params = {0};
struct ath11k *ar = arvif->ar;
u8 buf[128] = {0};
int ret;
if (ar->twt_enabled == 0) {
ath11k_err(ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
return ret;
buf[ret] = '\0';
ret = sscanf(buf,
"%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu",
¶ms.peer_macaddr[0],
¶ms.peer_macaddr[1],
¶ms.peer_macaddr[2],
¶ms.peer_macaddr[3],
¶ms.peer_macaddr[4],
¶ms.peer_macaddr[5],
¶ms.dialog_id,
¶ms.wake_intvl_us,
¶ms.wake_intvl_mantis,
¶ms.wake_dura_us,
¶ms.sp_offset_us,
¶ms.twt_cmd,
¶ms.flag_bcast,
¶ms.flag_trigger,
¶ms.flag_flow_type,
¶ms.flag_protection);
if (ret != 16)
return -EINVAL;
/* In the case of station vif, TWT is entirely handled by
* the firmware based on the input parameters in the TWT enable
* WMI command that is sent to the target during assoc.
* For manually testing the TWT feature, we need to first disable
* TWT and send enable command again with TWT input parameter
* sta_cong_timer_ms set to 0.
*/
if (arvif->vif->type == NL80211_IFTYPE_STATION) {
ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
ath11k_wmi_fill_default_twt_params(&twt_params);
twt_params.sta_cong_timer_ms = 0;
ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
}
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, ¶ms);
if (ret)
goto err_twt_add_dialog;
return count;
err_twt_add_dialog:
if (arvif->vif->type == NL80211_IFTYPE_STATION) {
ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
ath11k_wmi_fill_default_twt_params(&twt_params);
ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
}
return ret;
}
static ssize_t ath11k_write_twt_del_dialog(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_del_dialog_params params = { 0 };
struct wmi_twt_enable_params twt_params = {0};
struct ath11k *ar = arvif->ar;
u8 buf[64] = {0};
int ret;
if (ar->twt_enabled == 0) {
ath11k_err(ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
return ret;
buf[ret] = '\0';
ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
¶ms.peer_macaddr[0],
¶ms.peer_macaddr[1],
¶ms.peer_macaddr[2],
¶ms.peer_macaddr[3],
¶ms.peer_macaddr[4],
¶ms.peer_macaddr[5],
¶ms.dialog_id);
if (ret != 7)
return -EINVAL;
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_del_dialog_cmd(arvif->ar, ¶ms);
if (ret)
return ret;
if (arvif->vif->type == NL80211_IFTYPE_STATION) {
ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
ath11k_wmi_fill_default_twt_params(&twt_params);
ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
}
return count;
}
static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_pause_dialog_params params = { 0 };
u8 buf[64] = {0};
int ret;
if (arvif->ar->twt_enabled == 0) {
ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
return ret;
buf[ret] = '\0';
ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
¶ms.peer_macaddr[0],
¶ms.peer_macaddr[1],
¶ms.peer_macaddr[2],
¶ms.peer_macaddr[3],
¶ms.peer_macaddr[4],
¶ms.peer_macaddr[5],
¶ms.dialog_id);
if (ret != 7)
return -EINVAL;
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_pause_dialog_cmd(arvif->ar, ¶ms);
if (ret)
return ret;
return count;
}
static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_resume_dialog_params params = { 0 };
u8 buf[64] = {0};
int ret;
if (arvif->ar->twt_enabled == 0) {
ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
if (ret < 0)
return ret;
buf[ret] = '\0';
ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u",
¶ms.peer_macaddr[0],
¶ms.peer_macaddr[1],
¶ms.peer_macaddr[2],
¶ms.peer_macaddr[3],
¶ms.peer_macaddr[4],
¶ms.peer_macaddr[5],
¶ms.dialog_id,
¶ms.sp_offset_us,
¶ms.next_twt_size);
if (ret != 9)
return -EINVAL;
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_resume_dialog_cmd(arvif->ar, ¶ms);
if (ret)
return ret;
return count;
}
static const struct file_operations ath11k_fops_twt_add_dialog = {
.write = ath11k_write_twt_add_dialog,
.open = simple_open
};
static const struct file_operations ath11k_fops_twt_del_dialog = {
.write = ath11k_write_twt_del_dialog,
.open = simple_open
};
static const struct file_operations ath11k_fops_twt_pause_dialog = {
.write = ath11k_write_twt_pause_dialog,
.open = simple_open
};
static const struct file_operations ath11k_fops_twt_resume_dialog = {
.write = ath11k_write_twt_resume_dialog,
.open = simple_open
};
void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
struct ath11k_base *ab = arvif->ar->ab;
if (arvif->vif->type != NL80211_IFTYPE_AP &&
!(arvif->vif->type == NL80211_IFTYPE_STATION &&
test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
return;
arvif->debugfs_twt = debugfs_create_dir("twt",
arvif->vif->debugfs_dir);
debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
arvif, &ath11k_fops_twt_add_dialog);
debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
arvif, &ath11k_fops_twt_del_dialog);
debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
arvif, &ath11k_fops_twt_pause_dialog);
debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
arvif, &ath11k_fops_twt_resume_dialog);
}
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
if (!arvif->debugfs_twt)
return;
debugfs_remove_recursive(arvif->debugfs_twt);
arvif->debugfs_twt = NULL;
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/debugfs.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <linux/bitfield.h>
#include <linux/inetdevice.h>
#include <net/if_inet6.h>
#include <net/ipv6.h>
#include "mac.h"
#include "core.h"
#include "debug.h"
#include "wmi.h"
#include "hw.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "testmode.h"
#include "peer.h"
#include "debugfs_sta.h"
#include "hif.h"
#include "wow.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN5G(_channel, _freq, _flags) { \
.band = NL80211_BAND_5GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN6G(_channel, _freq, _flags) { \
.band = NL80211_BAND_6GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
static const struct ieee80211_channel ath11k_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
CHAN2G(3, 2422, 0),
CHAN2G(4, 2427, 0),
CHAN2G(5, 2432, 0),
CHAN2G(6, 2437, 0),
CHAN2G(7, 2442, 0),
CHAN2G(8, 2447, 0),
CHAN2G(9, 2452, 0),
CHAN2G(10, 2457, 0),
CHAN2G(11, 2462, 0),
CHAN2G(12, 2467, 0),
CHAN2G(13, 2472, 0),
CHAN2G(14, 2484, 0),
};
static const struct ieee80211_channel ath11k_5ghz_channels[] = {
CHAN5G(36, 5180, 0),
CHAN5G(40, 5200, 0),
CHAN5G(44, 5220, 0),
CHAN5G(48, 5240, 0),
CHAN5G(52, 5260, 0),
CHAN5G(56, 5280, 0),
CHAN5G(60, 5300, 0),
CHAN5G(64, 5320, 0),
CHAN5G(100, 5500, 0),
CHAN5G(104, 5520, 0),
CHAN5G(108, 5540, 0),
CHAN5G(112, 5560, 0),
CHAN5G(116, 5580, 0),
CHAN5G(120, 5600, 0),
CHAN5G(124, 5620, 0),
CHAN5G(128, 5640, 0),
CHAN5G(132, 5660, 0),
CHAN5G(136, 5680, 0),
CHAN5G(140, 5700, 0),
CHAN5G(144, 5720, 0),
CHAN5G(149, 5745, 0),
CHAN5G(153, 5765, 0),
CHAN5G(157, 5785, 0),
CHAN5G(161, 5805, 0),
CHAN5G(165, 5825, 0),
CHAN5G(169, 5845, 0),
CHAN5G(173, 5865, 0),
CHAN5G(177, 5885, 0),
};
static const struct ieee80211_channel ath11k_6ghz_channels[] = {
CHAN6G(1, 5955, 0),
CHAN6G(5, 5975, 0),
CHAN6G(9, 5995, 0),
CHAN6G(13, 6015, 0),
CHAN6G(17, 6035, 0),
CHAN6G(21, 6055, 0),
CHAN6G(25, 6075, 0),
CHAN6G(29, 6095, 0),
CHAN6G(33, 6115, 0),
CHAN6G(37, 6135, 0),
CHAN6G(41, 6155, 0),
CHAN6G(45, 6175, 0),
CHAN6G(49, 6195, 0),
CHAN6G(53, 6215, 0),
CHAN6G(57, 6235, 0),
CHAN6G(61, 6255, 0),
CHAN6G(65, 6275, 0),
CHAN6G(69, 6295, 0),
CHAN6G(73, 6315, 0),
CHAN6G(77, 6335, 0),
CHAN6G(81, 6355, 0),
CHAN6G(85, 6375, 0),
CHAN6G(89, 6395, 0),
CHAN6G(93, 6415, 0),
CHAN6G(97, 6435, 0),
CHAN6G(101, 6455, 0),
CHAN6G(105, 6475, 0),
CHAN6G(109, 6495, 0),
CHAN6G(113, 6515, 0),
CHAN6G(117, 6535, 0),
CHAN6G(121, 6555, 0),
CHAN6G(125, 6575, 0),
CHAN6G(129, 6595, 0),
CHAN6G(133, 6615, 0),
CHAN6G(137, 6635, 0),
CHAN6G(141, 6655, 0),
CHAN6G(145, 6675, 0),
CHAN6G(149, 6695, 0),
CHAN6G(153, 6715, 0),
CHAN6G(157, 6735, 0),
CHAN6G(161, 6755, 0),
CHAN6G(165, 6775, 0),
CHAN6G(169, 6795, 0),
CHAN6G(173, 6815, 0),
CHAN6G(177, 6835, 0),
CHAN6G(181, 6855, 0),
CHAN6G(185, 6875, 0),
CHAN6G(189, 6895, 0),
CHAN6G(193, 6915, 0),
CHAN6G(197, 6935, 0),
CHAN6G(201, 6955, 0),
CHAN6G(205, 6975, 0),
CHAN6G(209, 6995, 0),
CHAN6G(213, 7015, 0),
CHAN6G(217, 7035, 0),
CHAN6G(221, 7055, 0),
CHAN6G(225, 7075, 0),
CHAN6G(229, 7095, 0),
CHAN6G(233, 7115, 0),
/* new addition in IEEE Std 802.11ax-2021 */
CHAN6G(2, 5935, 0),
};
static struct ieee80211_rate ath11k_legacy_rates[] = {
{ .bitrate = 10,
.hw_value = ATH11K_HW_RATE_CCK_LP_1M },
{ .bitrate = 20,
.hw_value = ATH11K_HW_RATE_CCK_LP_2M,
.hw_value_short = ATH11K_HW_RATE_CCK_SP_2M,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = ATH11K_HW_RATE_CCK_LP_5_5M,
.hw_value_short = ATH11K_HW_RATE_CCK_SP_5_5M,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = ATH11K_HW_RATE_CCK_LP_11M,
.hw_value_short = ATH11K_HW_RATE_CCK_SP_11M,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60, .hw_value = ATH11K_HW_RATE_OFDM_6M },
{ .bitrate = 90, .hw_value = ATH11K_HW_RATE_OFDM_9M },
{ .bitrate = 120, .hw_value = ATH11K_HW_RATE_OFDM_12M },
{ .bitrate = 180, .hw_value = ATH11K_HW_RATE_OFDM_18M },
{ .bitrate = 240, .hw_value = ATH11K_HW_RATE_OFDM_24M },
{ .bitrate = 360, .hw_value = ATH11K_HW_RATE_OFDM_36M },
{ .bitrate = 480, .hw_value = ATH11K_HW_RATE_OFDM_48M },
{ .bitrate = 540, .hw_value = ATH11K_HW_RATE_OFDM_54M },
};
static const int
ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = {
[NL80211_BAND_2GHZ] = {
[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G,
[NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G,
[NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G,
[NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G,
[NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
},
[NL80211_BAND_5GHZ] = {
[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
[NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
[NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
[NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
[NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
[NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
},
[NL80211_BAND_6GHZ] = {
[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
[NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
[NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
[NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
[NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
[NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
},
};
const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = {
.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
.pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
HTT_RX_FP_CTRL_FILTER_FLASG3
};
#define ATH11K_MAC_FIRST_OFDM_RATE_IDX 4
#define ath11k_g_rates ath11k_legacy_rates
#define ath11k_g_rates_size (ARRAY_SIZE(ath11k_legacy_rates))
#define ath11k_a_rates (ath11k_legacy_rates + 4)
#define ath11k_a_rates_size (ARRAY_SIZE(ath11k_legacy_rates) - 4)
#define ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD 200 /* in msecs */
/* Overhead due to the processing of channel switch events from FW */
#define ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* in msecs */
static const u32 ath11k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
[WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
[WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy)
{
enum nl80211_he_ru_alloc ret;
switch (ru_phy) {
case RU_26:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
break;
case RU_52:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
break;
case RU_106:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
break;
case RU_242:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
break;
case RU_484:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
break;
case RU_996:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
break;
default:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
break;
}
return ret;
}
enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
{
enum nl80211_he_ru_alloc ret;
switch (ru_tones) {
case 26:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
break;
case 52:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
break;
case 106:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
break;
case 242:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
break;
case 484:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
break;
case 996:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
break;
case (996 * 2):
ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
break;
default:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
break;
}
return ret;
}
enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi)
{
enum nl80211_he_gi ret;
switch (sgi) {
case RX_MSDU_START_SGI_0_8_US:
ret = NL80211_RATE_INFO_HE_GI_0_8;
break;
case RX_MSDU_START_SGI_1_6_US:
ret = NL80211_RATE_INFO_HE_GI_1_6;
break;
case RX_MSDU_START_SGI_3_2_US:
ret = NL80211_RATE_INFO_HE_GI_3_2;
break;
default:
ret = NL80211_RATE_INFO_HE_GI_0_8;
break;
}
return ret;
}
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
{
u8 ret = 0;
switch (bw) {
case ATH11K_BW_20:
ret = RATE_INFO_BW_20;
break;
case ATH11K_BW_40:
ret = RATE_INFO_BW_40;
break;
case ATH11K_BW_80:
ret = RATE_INFO_BW_80;
break;
case ATH11K_BW_160:
ret = RATE_INFO_BW_160;
break;
}
return ret;
}
enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw)
{
switch (bw) {
case RATE_INFO_BW_20:
return ATH11K_BW_20;
case RATE_INFO_BW_40:
return ATH11K_BW_40;
case RATE_INFO_BW_80:
return ATH11K_BW_80;
case RATE_INFO_BW_160:
return ATH11K_BW_160;
default:
return ATH11K_BW_20;
}
}
int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate)
{
/* As default, it is OFDM rates */
int i = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
int max_rates_idx = ath11k_g_rates_size;
if (preamble == WMI_RATE_PREAMBLE_CCK) {
hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
i = 0;
max_rates_idx = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
}
while (i < max_rates_idx) {
if (hw_rc == ath11k_legacy_rates[i].hw_value) {
*rateidx = i;
*rate = ath11k_legacy_rates[i].bitrate;
return 0;
}
i++;
}
return -EINVAL;
}
static int get_num_chains(u32 mask)
{
int num_chains = 0;
while (mask) {
if (mask & BIT(0))
num_chains++;
mask >>= 1;
}
return num_chains;
}
u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
u32 bitrate)
{
int i;
for (i = 0; i < sband->n_bitrates; i++)
if (sband->bitrates[i].bitrate == bitrate)
return i;
return 0;
}
static u32
ath11k_mac_max_ht_nss(const u8 *ht_mcs_mask)
{
int nss;
for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
if (ht_mcs_mask[nss])
return nss + 1;
return 1;
}
static u32
ath11k_mac_max_vht_nss(const u16 *vht_mcs_mask)
{
int nss;
for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
if (vht_mcs_mask[nss])
return nss + 1;
return 1;
}
static u32
ath11k_mac_max_he_nss(const u16 *he_mcs_mask)
{
int nss;
for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--)
if (he_mcs_mask[nss])
return nss + 1;
return 1;
}
static u8 ath11k_parse_mpdudensity(u8 mpdudensity)
{
/* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
* 0 for no restriction
* 1 for 1/4 us
* 2 for 1/2 us
* 3 for 1 us
* 4 for 2 us
* 5 for 4 us
* 6 for 8 us
* 7 for 16 us
*/
switch (mpdudensity) {
case 0:
return 0;
case 1:
case 2:
case 3:
/* Our lower layer calculations limit our precision to
* 1 microsecond
*/
return 1;
case 4:
return 2;
case 5:
return 4;
case 6:
return 8;
case 7:
return 16;
default:
return 0;
}
}
static int ath11k_mac_vif_chan(struct ieee80211_vif *vif,
struct cfg80211_chan_def *def)
{
struct ieee80211_chanctx_conf *conf;
rcu_read_lock();
conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (!conf) {
rcu_read_unlock();
return -ENOENT;
}
*def = conf->def;
rcu_read_unlock();
return 0;
}
static bool ath11k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
case 10:
case 20:
case 55:
case 110:
return true;
}
return false;
}
u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
u8 hw_rate, bool cck)
{
const struct ieee80211_rate *rate;
int i;
for (i = 0; i < sband->n_bitrates; i++) {
rate = &sband->bitrates[i];
if (ath11k_mac_bitrate_is_cck(rate->bitrate) != cck)
continue;
if (rate->hw_value == hw_rate)
return i;
else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
rate->hw_value_short == hw_rate)
return i;
}
return 0;
}
static u8 ath11k_mac_bitrate_to_rate(int bitrate)
{
return DIV_ROUND_UP(bitrate, 5) |
(ath11k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
}
static void ath11k_get_arvif_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath11k_vif_iter *arvif_iter = data;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
if (arvif->vdev_id == arvif_iter->vdev_id)
arvif_iter->arvif = arvif;
}
struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id)
{
struct ath11k_vif_iter arvif_iter;
u32 flags;
memset(&arvif_iter, 0, sizeof(struct ath11k_vif_iter));
arvif_iter.vdev_id = vdev_id;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
ieee80211_iterate_active_interfaces_atomic(ar->hw,
flags,
ath11k_get_arvif_iter,
&arvif_iter);
if (!arvif_iter.arvif) {
ath11k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
return NULL;
}
return arvif_iter.arvif;
}
struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
u32 vdev_id)
{
int i;
struct ath11k_pdev *pdev;
struct ath11k_vif *arvif;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar &&
(pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
}
}
return NULL;
}
struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id)
{
int i;
struct ath11k_pdev *pdev;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar) {
if (pdev->ar->allocated_vdev_map & (1LL << vdev_id))
return pdev->ar;
}
}
return NULL;
}
struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
{
int i;
struct ath11k_pdev *pdev;
if (ab->hw_params.single_pdev_only) {
pdev = rcu_dereference(ab->pdevs_active[0]);
return pdev ? pdev->ar : NULL;
}
if (WARN_ON(pdev_id > ab->num_radios))
return NULL;
for (i = 0; i < ab->num_radios; i++) {
if (ab->fw_mode == ATH11K_FIRMWARE_MODE_FTM)
pdev = &ab->pdevs[i];
else
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->pdev_id == pdev_id)
return (pdev->ar ? pdev->ar : NULL);
}
return NULL;
}
struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
struct ath11k_vif *arvif;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up)
return arvif;
}
}
return NULL;
}
static bool ath11k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2)
{
return (((band1 == NL80211_BAND_2GHZ) && (band2 & WMI_HOST_WLAN_2G_CAP)) ||
(((band1 == NL80211_BAND_5GHZ) || (band1 == NL80211_BAND_6GHZ)) &&
(band2 & WMI_HOST_WLAN_5G_CAP)));
}
u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct ieee80211_vif *vif = arvif->vif;
struct cfg80211_chan_def def;
enum nl80211_band band;
u8 pdev_id = ab->target_pdev_ids[0].pdev_id;
int i;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return pdev_id;
band = def.chan->band;
for (i = 0; i < ab->target_pdev_count; i++) {
if (ath11k_mac_band_match(band, ab->target_pdev_ids[i].supported_bands))
return ab->target_pdev_ids[i].pdev_id;
}
return pdev_id;
}
u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar)
{
struct ath11k_vif *arvif;
arvif = ath11k_mac_get_vif_up(ar->ab);
if (arvif)
return ath11k_mac_get_target_pdev_id_from_vif(arvif);
else
return ar->ab->target_pdev_ids[0].pdev_id;
}
static void ath11k_pdev_caps_update(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
ar->max_tx_power = ab->target_caps.hw_max_tx_power;
/* FIXME Set min_tx_power to ab->target_caps.hw_min_tx_power.
* But since the received value in svcrdy is same as hw_max_tx_power,
* we can set ar->min_tx_power to 0 currently until
* this is fixed in firmware
*/
ar->min_tx_power = 0;
ar->txpower_limit_2g = ar->max_tx_power;
ar->txpower_limit_5g = ar->max_tx_power;
ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
}
static int ath11k_mac_txpower_recalc(struct ath11k *ar)
{
struct ath11k_pdev *pdev = ar->pdev;
struct ath11k_vif *arvif;
int ret, txpower = -1;
u32 param;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->txpower <= 0)
continue;
if (txpower == -1)
txpower = arvif->txpower;
else
txpower = min(txpower, arvif->txpower);
}
if (txpower == -1)
return 0;
/* txpwr is set as 2 units per dBm in FW*/
txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
ar->max_tx_power) * 2;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower to set in hw %d\n",
txpower / 2);
if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
ar->txpower_limit_2g != txpower) {
param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
ret = ath11k_wmi_pdev_set_param(ar, param,
txpower, ar->pdev->pdev_id);
if (ret)
goto fail;
ar->txpower_limit_2g = txpower;
}
if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
ar->txpower_limit_5g != txpower) {
param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
ret = ath11k_wmi_pdev_set_param(ar, param,
txpower, ar->pdev->pdev_id);
if (ret)
goto fail;
ar->txpower_limit_5g = txpower;
}
return 0;
fail:
ath11k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
txpower / 2, param, ret);
return ret;
}
static int ath11k_recalc_rtscts_prot(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
u32 vdev_param, rts_cts = 0;
int ret;
lockdep_assert_held(&ar->conf_mutex);
vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
/* Enable RTS/CTS protection for sw retries (when legacy stations
* are in BSS) or by default only for second rate series.
* TODO: Check if we need to enable CTS 2 Self in any case
*/
rts_cts = WMI_USE_RTS_CTS;
if (arvif->num_legacy_stations > 0)
rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4;
else
rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4;
/* Need not send duplicate param value to firmware */
if (arvif->rtscts_prot_mode == rts_cts)
return 0;
arvif->rtscts_prot_mode = rts_cts;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d recalc rts/cts prot %d\n",
arvif->vdev_id, rts_cts);
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, rts_cts);
if (ret)
ath11k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
static int ath11k_mac_set_kickout(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
u32 param;
int ret;
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
ATH11K_KICKOUT_THRESHOLD,
ar->pdev->pdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
ATH11K_KEEPALIVE_MIN_IDLE);
if (ret) {
ath11k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
ATH11K_KEEPALIVE_MAX_IDLE);
if (ret) {
ath11k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
ATH11K_KEEPALIVE_MAX_UNRESPONSIVE);
if (ret) {
ath11k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
return 0;
}
void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
{
struct ath11k_peer *peer, *tmp;
struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
ath11k_peer_rx_tid_cleanup(ar, peer);
ath11k_peer_rhash_delete(ab, peer);
list_del(&peer->list);
kfree(peer);
}
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
ar->num_peers = 0;
ar->num_stations = 0;
}
static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
if (!wait_for_completion_timeout(&ar->vdev_setup_done,
ATH11K_VDEV_SETUP_TIMEOUT_HZ))
return -ETIMEDOUT;
return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
}
static void
ath11k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf,
void *data)
{
struct cfg80211_chan_def **def = data;
*def = &conf->def;
}
static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_channel *channel;
struct wmi_vdev_start_req_arg arg = {};
int ret;
lockdep_assert_held(&ar->conf_mutex);
channel = chandef->chan;
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
arg.channel.band_center_freq2 = chandef->center_freq2;
arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width];
arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
arg.channel.min_power = 0;
arg.channel.max_power = channel->max_power;
arg.channel.max_reg_power = channel->max_reg_power;
arg.channel.max_antenna_gain = channel->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
arg.channel.passive = !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
reinit_completion(&ar->vdev_setup_done);
reinit_completion(&ar->vdev_delete_done);
ret = ath11k_wmi_vdev_start(ar, &arg, false);
if (ret) {
ath11k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
vdev_id, ret);
return ret;
}
ret = ath11k_mac_vdev_setup_sync(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
vdev_id, ret);
return ret;
}
ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr, NULL, 0, 0);
if (ret) {
ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
goto vdev_stop;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i started\n",
vdev_id);
return 0;
vdev_stop:
reinit_completion(&ar->vdev_setup_done);
ret = ath11k_wmi_vdev_stop(ar, vdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
vdev_id, ret);
return ret;
}
ret = ath11k_mac_vdev_setup_sync(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i stop: %d\n",
vdev_id, ret);
return ret;
}
return -EIO;
}
static int ath11k_mac_monitor_vdev_stop(struct ath11k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
ret = ath11k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
ret = ath11k_mac_vdev_setup_sync(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
ret = ath11k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i stopped\n",
ar->monitor_vdev_id);
return 0;
}
static int ath11k_mac_monitor_vdev_create(struct ath11k *ar)
{
struct ath11k_pdev *pdev = ar->pdev;
struct vdev_create_params param = {};
int bit, ret;
u8 tmp_addr[6] = {0};
u16 nss;
lockdep_assert_held(&ar->conf_mutex);
if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
return 0;
if (ar->ab->free_vdev_map == 0) {
ath11k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
return -ENOMEM;
}
bit = __ffs64(ar->ab->free_vdev_map);
ar->monitor_vdev_id = bit;
param.if_id = ar->monitor_vdev_id;
param.type = WMI_VDEV_TYPE_MONITOR;
param.subtype = WMI_VDEV_SUBTYPE_NONE;
param.pdev_id = pdev->pdev_id;
if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
param.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
param.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
}
if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
param.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
param.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
}
ret = ath11k_wmi_vdev_create(ar, tmp_addr, ¶m);
if (ret) {
ath11k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
ar->monitor_vdev_id, ret);
ar->monitor_vdev_id = -1;
return ret;
}
nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
ret = ath11k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
WMI_VDEV_PARAM_NSS, nss);
if (ret) {
ath11k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
goto err_vdev_del;
}
ret = ath11k_mac_txpower_recalc(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to recalc txpower for monitor vdev %d: %d\n",
ar->monitor_vdev_id, ret);
goto err_vdev_del;
}
ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
ar->num_created_vdevs++;
set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d created\n",
ar->monitor_vdev_id);
return 0;
err_vdev_del:
ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
ar->monitor_vdev_id = -1;
return ret;
}
static int ath11k_mac_monitor_vdev_delete(struct ath11k *ar)
{
int ret;
unsigned long time_left;
lockdep_assert_held(&ar->conf_mutex);
if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
return 0;
reinit_completion(&ar->vdev_delete_done);
ret = ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
ATH11K_VDEV_DELETE_TIMEOUT_HZ);
if (time_left == 0) {
ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
} else {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d deleted\n",
ar->monitor_vdev_id);
ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
ar->num_created_vdevs--;
ar->monitor_vdev_id = -1;
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
}
return ret;
}
static int ath11k_mac_monitor_start(struct ath11k *ar)
{
struct cfg80211_chan_def *chandef = NULL;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
return 0;
ieee80211_iter_chan_contexts_atomic(ar->hw,
ath11k_mac_get_any_chandef_iter,
&chandef);
if (!chandef)
return 0;
ret = ath11k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
if (ret) {
ath11k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
ath11k_mac_monitor_vdev_delete(ar);
return ret;
}
set_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
ar->num_started_vdevs++;
ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, false);
if (ret) {
ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during start: %d",
ret);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor started\n");
return 0;
}
static int ath11k_mac_monitor_stop(struct ath11k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
return 0;
ret = ath11k_mac_monitor_vdev_stop(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
return ret;
}
clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
ar->num_started_vdevs--;
ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, true);
if (ret) {
ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during stop: %d",
ret);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor stopped ret %d\n", ret);
return 0;
}
static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_conf *conf = &ar->hw->conf;
enum wmi_sta_powersave_param param;
enum wmi_sta_ps_mode psmode;
int ret;
int timeout;
bool enable_ps;
lockdep_assert_held(&arvif->ar->conf_mutex);
if (arvif->vif->type != NL80211_IFTYPE_STATION)
return 0;
enable_ps = arvif->ps;
if (!arvif->is_started) {
/* mac80211 can update vif powersave state while disconnected.
* Firmware doesn't behave nicely and consumes more power than
* necessary if PS is disabled on a non-started vdev. Hence
* force-enable PS for non-running vdevs.
*/
psmode = WMI_STA_PS_MODE_ENABLED;
} else if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
timeout = conf->dynamic_ps_timeout;
if (timeout == 0) {
/* firmware doesn't like 0 */
timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
}
ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
timeout);
if (ret) {
ath11k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return ret;
}
} else {
psmode = WMI_STA_PS_MODE_DISABLED;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d psmode %s\n",
arvif->vdev_id, psmode ? "enable" : "disable");
ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
if (ret) {
ath11k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
psmode, arvif->vdev_id, ret);
return ret;
}
return 0;
}
static int ath11k_mac_config_ps(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_mac_vif_setup_ps(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to setup powersave: %d\n", ret);
break;
}
}
return ret;
}
static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath11k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR) {
set_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
&ar->monitor_flags))
goto out;
ret = ath11k_mac_monitor_vdev_create(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to create monitor vdev: %d",
ret);
goto out;
}
ret = ath11k_mac_monitor_start(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to start monitor: %d",
ret);
goto err_mon_del;
}
} else {
clear_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
&ar->monitor_flags))
goto out;
ret = ath11k_mac_monitor_stop(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to stop monitor: %d",
ret);
goto out;
}
ret = ath11k_mac_monitor_vdev_delete(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to delete monitor vdev: %d",
ret);
goto out;
}
}
}
out:
mutex_unlock(&ar->conf_mutex);
return ret;
err_mon_del:
ath11k_mac_monitor_vdev_delete(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void ath11k_mac_setup_nontx_vif_rsnie(struct ath11k_vif *arvif,
bool tx_arvif_rsnie_present,
const u8 *profile, u8 profile_len)
{
if (cfg80211_find_ie(WLAN_EID_RSN, profile, profile_len)) {
arvif->rsnie_present = true;
} else if (tx_arvif_rsnie_present) {
int i;
u8 nie_len;
const u8 *nie = cfg80211_find_ext_ie(WLAN_EID_EXT_NON_INHERITANCE,
profile, profile_len);
if (!nie)
return;
nie_len = nie[1];
nie += 2;
for (i = 0; i < nie_len; i++) {
if (nie[i] == WLAN_EID_RSN) {
arvif->rsnie_present = false;
break;
}
}
}
}
static bool ath11k_mac_set_nontx_vif_params(struct ath11k_vif *tx_arvif,
struct ath11k_vif *arvif,
struct sk_buff *bcn)
{
struct ieee80211_mgmt *mgmt;
const u8 *ies, *profile, *next_profile;
int ies_len;
ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
mgmt = (struct ieee80211_mgmt *)bcn->data;
ies += sizeof(mgmt->u.beacon);
ies_len = skb_tail_pointer(bcn) - ies;
ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ies, ies_len);
arvif->rsnie_present = tx_arvif->rsnie_present;
while (ies) {
u8 mbssid_len;
ies_len -= (2 + ies[1]);
mbssid_len = ies[1] - 1;
profile = &ies[3];
while (mbssid_len) {
u8 profile_len;
profile_len = profile[1];
next_profile = profile + (2 + profile_len);
mbssid_len -= (2 + profile_len);
profile += 2;
profile_len -= (2 + profile[1]);
profile += (2 + profile[1]); /* nontx capabilities */
profile_len -= (2 + profile[1]);
profile += (2 + profile[1]); /* SSID */
if (profile[2] == arvif->vif->bss_conf.bssid_index) {
profile_len -= 5;
profile = profile + 5;
ath11k_mac_setup_nontx_vif_rsnie(arvif,
tx_arvif->rsnie_present,
profile,
profile_len);
return true;
}
profile = next_profile;
}
ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, profile,
ies_len);
}
return false;
}
static void ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
struct sk_buff *bcn)
{
struct ieee80211_mgmt *mgmt;
u8 *ies;
ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
mgmt = (struct ieee80211_mgmt *)bcn->data;
ies += sizeof(mgmt->u.beacon);
if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
arvif->rsnie_present = true;
else
arvif->rsnie_present = false;
if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
else
arvif->wpaie_present = false;
}
static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
{
struct ath11k_vif *tx_arvif;
struct ieee80211_ema_beacons *beacons;
int ret = 0;
bool nontx_vif_params_set = false;
u32 params = 0;
u8 i = 0;
tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw,
tx_arvif->vif, 0);
if (!beacons || !beacons->cnt) {
ath11k_warn(arvif->ar->ab,
"failed to get ema beacon templates from mac80211\n");
return -EPERM;
}
if (tx_arvif == arvif)
ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb);
else
arvif->wpaie_present = tx_arvif->wpaie_present;
for (i = 0; i < beacons->cnt; i++) {
if (tx_arvif != arvif && !nontx_vif_params_set)
nontx_vif_params_set =
ath11k_mac_set_nontx_vif_params(tx_arvif, arvif,
beacons->bcn[i].skb);
params = beacons->cnt;
params |= (i << WMI_EMA_TMPL_IDX_SHIFT);
params |= ((!i ? 1 : 0) << WMI_EMA_FIRST_TMPL_SHIFT);
params |= ((i + 1 == beacons->cnt ? 1 : 0) << WMI_EMA_LAST_TMPL_SHIFT);
ret = ath11k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
&beacons->bcn[i].offs,
beacons->bcn[i].skb, params);
if (ret) {
ath11k_warn(tx_arvif->ar->ab,
"failed to set ema beacon template id %i error %d\n",
i, ret);
break;
}
}
ieee80211_beacon_free_ema_list(beacons);
if (tx_arvif != arvif && !nontx_vif_params_set)
return -EINVAL; /* Profile not found in the beacons */
return ret;
}
static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *tx_arvif = arvif;
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
int ret;
if (vif->mbssid_tx_vif) {
tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif);
if (tx_arvif != arvif) {
ar = tx_arvif->ar;
ab = ar->ab;
hw = ar->hw;
vif = tx_arvif->vif;
}
}
bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!bcn) {
ath11k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
}
if (tx_arvif == arvif)
ath11k_mac_set_vif_params(tx_arvif, bcn);
else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn))
return -EINVAL;
ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, 0);
kfree_skb(bcn);
if (ret)
ath11k_warn(ab, "failed to submit beacon template command: %d\n",
ret);
return ret;
}
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
{
struct ieee80211_vif *vif = arvif->vif;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
/* Target does not expect beacon templates for the already up
* non-transmitting interfaces, and results in a crash if sent.
*/
if (vif->mbssid_tx_vif &&
arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up)
return 0;
if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif)
return ath11k_mac_setup_bcn_tmpl_ema(arvif);
return ath11k_mac_setup_bcn_tmpl_mbssid(arvif);
}
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
{
struct ieee80211_vif *vif = arvif->vif;
if (!vif->bss_conf.color_change_active && !arvif->bcca_zero_sent)
return;
if (vif->bss_conf.color_change_active &&
ieee80211_beacon_cntdwn_is_complete(vif)) {
arvif->bcca_zero_sent = true;
ieee80211_color_change_finish(vif);
return;
}
arvif->bcca_zero_sent = false;
if (vif->bss_conf.color_change_active)
ieee80211_beacon_update_cntdwn(vif);
ath11k_mac_setup_bcn_tmpl(arvif);
}
static void ath11k_control_beaconing(struct ath11k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath11k *ar = arvif->ar;
struct ath11k_vif *tx_arvif = NULL;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
if (!info->enable_beacon) {
ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
ath11k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
arvif->vdev_id, ret);
arvif->is_up = false;
return;
}
/* Install the beacon template to the FW */
ret = ath11k_mac_setup_bcn_tmpl(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
ret);
return;
}
arvif->tx_seq_no = 0x1000;
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
if (arvif->vif->mbssid_tx_vif)
tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
info->bssid_index,
1 << info->bssid_indicator);
if (ret) {
ath11k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
return;
}
arvif->is_up = true;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d up\n", arvif->vdev_id);
}
static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct sk_buff *skb = data;
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
if (vif->type != NL80211_IFTYPE_STATION)
return;
if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
return;
cancel_delayed_work(&arvif->connection_loss_work);
}
void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb)
{
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath11k_mac_handle_beacon_iter,
skb);
}
static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
u32 *vdev_id = data;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k *ar = arvif->ar;
struct ieee80211_hw *hw = ar->hw;
if (arvif->vdev_id != *vdev_id)
return;
if (!arvif->is_up)
return;
ieee80211_beacon_loss(vif);
/* Firmware doesn't report beacon loss events repeatedly. If AP probe
* (done by mac80211) succeeds but beacons do not resume then it
* doesn't make sense to continue operation. Queue connection loss work
* which can be cancelled when beacon is received.
*/
ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
ATH11K_CONNECTION_LOSS_HZ);
}
void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id)
{
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath11k_mac_handle_beacon_miss_iter,
&vdev_id);
}
static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work)
{
struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
connection_loss_work.work);
struct ieee80211_vif *vif = arvif->vif;
if (!arvif->is_up)
return;
ieee80211_connection_loss(vif);
}
static void ath11k_peer_assoc_h_basic(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
u32 aid;
lockdep_assert_held(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_STATION)
aid = vif->cfg.aid;
else
aid = sta->aid;
ether_addr_copy(arg->peer_mac, sta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
arg->peer_listen_intval = ar->hw->conf.listen_interval;
arg->peer_nss = 1;
arg->peer_caps = vif->bss_conf.assoc_capability;
}
static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
struct ieee80211_bss_conf *info = &vif->bss_conf;
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
lockdep_assert_held(&ar->conf_mutex);
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (arvif->rsnie_present || arvif->wpaie_present) {
arg->need_ptk_4_way = true;
if (arvif->wpaie_present)
arg->need_gtk_2_way = true;
} else if (bss) {
const struct cfg80211_bss_ies *ies;
rcu_read_lock();
rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
ies = rcu_dereference(bss->ies);
wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
ies->data,
ies->len);
rcu_read_unlock();
cfg80211_put_bss(ar->hw->wiphy, bss);
}
/* FIXME: base on RSN IE/WPA IE is a correct idea? */
if (rsnie || wpaie) {
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"%s: rsn ie found\n", __func__);
arg->need_ptk_4_way = true;
}
if (wpaie) {
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"%s: wpa ie found\n", __func__);
arg->need_gtk_2_way = true;
}
if (sta->mfp) {
/* TODO: Need to check if FW supports PMF? */
arg->is_pmf_enabled = true;
}
/* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
}
static void ath11k_peer_assoc_h_rates(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
enum nl80211_band band;
u32 ratemask;
u8 rate;
int i;
lockdep_assert_held(&ar->conf_mutex);
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
band = def.chan->band;
sband = ar->hw->wiphy->bands[band];
ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
rateset->num_rates = 0;
for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
if (!(ratemask & 1))
continue;
rate = ath11k_mac_bitrate_to_rate(rates->bitrate);
rateset->rates[rateset->num_rates] = rate;
rateset->num_rates++;
}
}
static bool
ath11k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask)
{
int nss;
for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
if (ht_mcs_mask[nss])
return false;
return true;
}
static bool
ath11k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask)
{
int nss;
for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
if (vht_mcs_mask[nss])
return false;
return true;
}
static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
int i, n;
u8 max_nss;
u32 stbc;
lockdep_assert_held(&ar->conf_mutex);
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
if (!ht_cap->ht_supported)
return;
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
if (ath11k_peer_assoc_h_ht_masked(ht_mcs_mask))
return;
arg->ht_flag = true;
arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ht_cap->ampdu_factor)) - 1;
arg->peer_mpdu_density =
ath11k_parse_mpdudensity(ht_cap->ampdu_density);
arg->peer_ht_caps = ht_cap->cap;
arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG;
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->ldpc_flag = true;
if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->bw_40 = true;
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
/* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20
* and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset
* both flags if guard interval is Default GI
*/
if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI)
arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40);
if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40))
arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
}
if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG;
arg->stbc_flag = true;
}
if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S;
arg->peer_rate_caps |= stbc;
arg->stbc_flag = true;
}
if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG;
else if (ht_cap->mcs.rx_mask[1])
arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG;
for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
(ht_mcs_mask[i / 8] & BIT(i % 8))) {
max_nss = (i / 8) + 1;
arg->peer_ht_rates.rates[n++] = i;
}
/* This is a workaround for HT-enabled STAs which break the spec
* and have no HT capabilities RX mask (no HT RX MCS map).
*
* As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
* MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
*
* Firmware asserts if such situation occurs.
*/
if (n == 0) {
arg->peer_ht_rates.num_rates = 8;
for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "ht peer %pM mcs cnt %d nss %d\n",
arg->peer_mac,
arg->peer_ht_rates.num_rates,
arg->peer_nss);
}
static int ath11k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
{
switch ((mcs_map >> (2 * nss)) & 0x3) {
case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
}
return 0;
}
static u16
ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
{
int idx_limit;
int nss;
u16 mcs_map;
u16 mcs;
for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
mcs_map = ath11k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
vht_mcs_limit[nss];
if (mcs_map)
idx_limit = fls(mcs_map) - 1;
else
idx_limit = -1;
switch (idx_limit) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
break;
case 8:
mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
break;
case 9:
mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
break;
default:
WARN_ON(1);
fallthrough;
case -1:
mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
break;
}
tx_mcs_set &= ~(0x3 << (nss * 2));
tx_mcs_set |= mcs << (nss * 2);
}
return tx_mcs_set;
}
static u8 ath11k_get_nss_160mhz(struct ath11k *ar,
u8 max_nss)
{
u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info;
u8 max_sup_nss = 0;
switch (nss_ratio_info) {
case WMI_NSS_RATIO_1BY2_NSS:
max_sup_nss = max_nss >> 1;
break;
case WMI_NSS_RATIO_3BY4_NSS:
ath11k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n");
break;
case WMI_NSS_RATIO_1_NSS:
max_sup_nss = max_nss;
break;
case WMI_NSS_RATIO_2_NSS:
ath11k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n");
break;
default:
ath11k_warn(ar->ab, "invalid nss ratio received from firmware: %d\n",
nss_ratio_info);
break;
}
return max_sup_nss;
}
static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
enum nl80211_band band;
u16 *vht_mcs_mask;
u8 ampdu_factor;
u8 max_nss, vht_mcs;
int i, vht_nss, nss_idx;
bool user_rate_valid = true;
u32 rx_nss, tx_nss, nss_160;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
if (!vht_cap->vht_supported)
return;
band = def.chan->band;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
if (ath11k_peer_assoc_h_vht_masked(vht_mcs_mask))
return;
arg->vht_flag = true;
/* TODO: similar flags required? */
arg->vht_capable = true;
if (def.chan->band == NL80211_BAND_2GHZ)
arg->vht_ng_flag = true;
arg->peer_vht_caps = vht_cap->cap;
ampdu_factor = (vht_cap->cap &
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
* zero in VHT IE. Using it would result in degraded throughput.
* arg->peer_max_mpdu at this point contains HT max_mpdu so keep
* it if VHT max_mpdu is smaller.
*/
arg->peer_max_mpdu = max(arg->peer_max_mpdu,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
if (vht_nss > sta->deflink.rx_nss) {
user_rate_valid = false;
for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
if (vht_mcs_mask[nss_idx]) {
user_rate_valid = true;
break;
}
}
}
if (!user_rate_valid) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting vht range mcs value to peer supported nss %d for peer %pM\n",
sta->deflink.rx_nss, sta->addr);
vht_mcs_mask[sta->deflink.rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
}
/* Calculate peer NSS capability from VHT capabilities if STA
* supports VHT.
*/
for (i = 0, max_nss = 0; i < NL80211_VHT_NSS_MAX; i++) {
vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
(2 * i) & 3;
if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED &&
vht_mcs_mask[i])
max_nss = i + 1;
}
arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit(
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
/* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
* VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
* so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
*/
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
IEEE80211_VHT_MCS_NOT_SUPPORTED)
arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
/* TODO: Check */
arg->tx_max_mcs_nss = 0xFF;
if (arg->peer_phymode == MODE_11AC_VHT160 ||
arg->peer_phymode == MODE_11AC_VHT80_80) {
tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
rx_nss = min(arg->peer_nss, tx_nss);
arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
if (!rx_nss) {
ath11k_warn(ar->ab, "invalid max_nss\n");
return;
}
if (arg->peer_phymode == MODE_11AC_VHT160)
nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
else
nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
arg->peer_bw_rxnss_override |= nss_160;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags,
arg->peer_bw_rxnss_override);
}
static int ath11k_mac_get_max_he_mcs_map(u16 mcs_map, int nss)
{
switch ((mcs_map >> (2 * nss)) & 0x3) {
case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1;
case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1;
case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1;
}
return 0;
}
static u16 ath11k_peer_assoc_h_he_limit(u16 tx_mcs_set,
const u16 he_mcs_limit[NL80211_HE_NSS_MAX])
{
int idx_limit;
int nss;
u16 mcs_map;
u16 mcs;
for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
mcs_map = ath11k_mac_get_max_he_mcs_map(tx_mcs_set, nss) &
he_mcs_limit[nss];
if (mcs_map)
idx_limit = fls(mcs_map) - 1;
else
idx_limit = -1;
switch (idx_limit) {
case 0 ... 7:
mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
break;
case 8:
case 9:
mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
break;
case 10:
case 11:
mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
break;
default:
WARN_ON(1);
fallthrough;
case -1:
mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
break;
}
tx_mcs_set &= ~(0x3 << (nss * 2));
tx_mcs_set |= mcs << (nss * 2);
}
return tx_mcs_set;
}
static bool
ath11k_peer_assoc_h_he_masked(const u16 *he_mcs_mask)
{
int nss;
for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++)
if (he_mcs_mask[nss])
return false;
return true;
}
static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
enum nl80211_band band;
u16 he_mcs_mask[NL80211_HE_NSS_MAX];
u8 max_nss, he_mcs;
u16 he_tx_mcs = 0, v = 0;
int i, he_nss, nss_idx;
bool user_rate_valid = true;
u32 rx_nss, tx_nss, nss_160;
u8 ampdu_factor, rx_mcs_80, rx_mcs_160;
u16 mcs_160_map, mcs_80_map;
bool support_160;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
if (!he_cap->has_he)
return;
band = def.chan->band;
memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs,
sizeof(he_mcs_mask));
if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
return;
arg->he_flag = true;
support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G);
/* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
if (support_160) {
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
rx_mcs_160 = i + 1;
break;
}
}
}
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
rx_mcs_80 = i + 1;
break;
}
}
if (support_160)
max_nss = min(rx_mcs_80, rx_mcs_160);
else
max_nss = rx_mcs_80;
arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
memcpy_and_pad(&arg->peer_he_cap_macinfo,
sizeof(arg->peer_he_cap_macinfo),
he_cap->he_cap_elem.mac_cap_info,
sizeof(he_cap->he_cap_elem.mac_cap_info),
0);
memcpy_and_pad(&arg->peer_he_cap_phyinfo,
sizeof(arg->peer_he_cap_phyinfo),
he_cap->he_cap_elem.phy_cap_info,
sizeof(he_cap->he_cap_elem.phy_cap_info),
0);
arg->peer_he_ops = vif->bss_conf.he_oper.params;
/* the top most byte is used to indicate BSS color info */
arg->peer_he_ops &= 0xffffff;
/* As per section 26.6.1 11ax Draft5.0, if the Max AMPDU Exponent Extension
* in HE cap is zero, use the arg->peer_max_mpdu as calculated while parsing
* VHT caps(if VHT caps is present) or HT caps (if VHT caps is not present).
*
* For non-zero value of Max AMPDU Extponent Extension in HE MAC caps,
* if a HE STA sends VHT cap and HE cap IE in assoc request then, use
* MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length.
* If a HE STA that does not send VHT cap, but HE and HT cap in assoc
* request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
* length.
*/
ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
if (sta->deflink.vht_cap.vht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
else if (sta->deflink.ht_cap.ht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
}
if (he_cap->he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
int bit = 7;
int nss, ru;
arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] &
IEEE80211_PPE_THRES_NSS_MASK;
arg->peer_ppet.ru_bit_mask =
(he_cap->ppe_thres[0] &
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) {
for (ru = 0; ru < 4; ru++) {
u32 val = 0;
int i;
if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0)
continue;
for (i = 0; i < 6; i++) {
val >>= 1;
val |= ((he_cap->ppe_thres[bit / 8] >>
(bit % 8)) & 0x1) << 5;
bit++;
}
arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |=
val << (ru * 6);
}
}
}
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES)
arg->twt_responder = true;
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
if (he_nss > sta->deflink.rx_nss) {
user_rate_valid = false;
for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
if (he_mcs_mask[nss_idx]) {
user_rate_valid = true;
break;
}
}
}
if (!user_rate_valid) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting he range mcs value to peer supported nss %d for peer %pM\n",
sta->deflink.rx_nss, sta->addr);
he_mcs_mask[sta->deflink.rx_nss - 1] = he_mcs_mask[he_nss - 1];
}
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
arg->peer_he_mcs_count++;
he_tx_mcs = v;
}
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
arg->peer_he_mcs_count++;
if (!he_tx_mcs)
he_tx_mcs = v;
fallthrough;
default:
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
arg->peer_he_mcs_count++;
if (!he_tx_mcs)
he_tx_mcs = v;
break;
}
/* Calculate peer NSS capability from HE capabilities if STA
* supports HE.
*/
for (i = 0, max_nss = 0; i < NL80211_HE_NSS_MAX; i++) {
he_mcs = he_tx_mcs >> (2 * i) & 3;
/* In case of fixed rates, MCS Range in he_tx_mcs might have
* unsupported range, with he_mcs_mask set, so check either of them
* to find nss.
*/
if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED ||
he_mcs_mask[i])
max_nss = i + 1;
}
arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
if (arg->peer_phymode == MODE_11AX_HE160 ||
arg->peer_phymode == MODE_11AX_HE80_80) {
tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
rx_nss = min(arg->peer_nss, tx_nss);
arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
if (!rx_nss) {
ath11k_warn(ar->ab, "invalid max_nss\n");
return;
}
if (arg->peer_phymode == MODE_11AX_HE160)
nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
else
nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
arg->peer_bw_rxnss_override |= nss_160;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"he peer %pM nss %d mcs cnt %d nss_override 0x%x\n",
sta->addr, arg->peer_nss,
arg->peer_he_mcs_count,
arg->peer_bw_rxnss_override);
}
static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
struct cfg80211_chan_def def;
enum nl80211_band band;
u8 ampdu_factor;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
band = def.chan->band;
if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
return;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
arg->bw_40 = true;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
arg->peer_mpdu_density =
ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
arg->peer_he_caps_6ghz));
/* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
* receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
* indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
* Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
* Band Capabilities element in the 6 GHz band.
*
* Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
* factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
*/
ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK,
he_cap->he_cap_elem.mac_cap_info[3]) +
FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP,
arg->peer_he_caps_6ghz);
arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
}
static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
int smps;
if (!ht_cap->ht_supported && !sta->deflink.he_6ghz_capa.capa)
return;
if (ht_cap->ht_supported) {
smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
} else {
smps = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_SM_PS);
}
switch (smps) {
case WLAN_HT_CAP_SM_PS_STATIC:
arg->static_mimops_flag = true;
break;
case WLAN_HT_CAP_SM_PS_DYNAMIC:
arg->dynamic_mimops_flag = true;
break;
case WLAN_HT_CAP_SM_PS_DISABLED:
arg->spatial_mux_flag = true;
break;
default:
break;
}
}
static void ath11k_peer_assoc_h_qos(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
if (sta->wme) {
/* TODO: Check WME vs QoS */
arg->is_wme_set = true;
arg->qos_flag = true;
}
if (sta->wme && sta->uapsd_queues) {
/* TODO: Check WME vs QoS */
arg->is_wme_set = true;
arg->apsd_flag = true;
arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG;
}
break;
case WMI_VDEV_TYPE_STA:
if (sta->wme) {
arg->is_wme_set = true;
arg->qos_flag = true;
}
break;
default:
break;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM qos %d\n",
sta->addr, arg->qos_flag);
}
static int ath11k_peer_assoc_qos_ap(struct ath11k *ar,
struct ath11k_vif *arvif,
struct ieee80211_sta *sta)
{
struct ap_ps_params params;
u32 max_sp;
u32 uapsd;
int ret;
lockdep_assert_held(&ar->conf_mutex);
params.vdev_id = arvif->vdev_id;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
uapsd = 0;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
max_sp = 0;
if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
max_sp = sta->max_sp;
params.param = WMI_AP_PS_PEER_PARAM_UAPSD;
params.value = uapsd;
ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms);
if (ret)
goto err;
params.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
params.value = max_sp;
ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms);
if (ret)
goto err;
/* TODO revisit during testing */
params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms);
if (ret)
goto err;
params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms);
if (ret)
goto err;
return 0;
err:
ath11k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
params.param, arvif->vdev_id, ret);
return ret;
}
static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
ATH11K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
struct ieee80211_sta *sta)
{
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
switch (sta->deflink.vht_cap.cap &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
return MODE_11AC_VHT80_80;
default:
/* not sure if this is a valid case? */
return MODE_11AC_VHT160;
}
}
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar,
struct ieee80211_sta *sta)
{
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11AX_HE160;
else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11AX_HE80_80;
/* not sure if this is a valid case? */
return MODE_11AX_HE160;
}
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AX_HE80;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AX_HE40;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AX_HE20;
return MODE_UNKNOWN;
}
static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
const u16 *he_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
switch (band) {
case NL80211_BAND_2GHZ:
if (sta->deflink.he_cap.has_he &&
!ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AX_HE40_2G;
else
phymode = MODE_11AX_HE20_2G;
} else if (sta->deflink.vht_cap.vht_supported &&
!ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
} else if (sta->deflink.ht_cap.ht_supported &&
!ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
} else if (ath11k_mac_sta_has_ofdm_only(sta)) {
phymode = MODE_11G;
} else {
phymode = MODE_11B;
}
break;
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check HE first */
if (sta->deflink.he_cap.has_he &&
!ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
phymode = ath11k_mac_get_phymode_he(ar, sta);
} else if (sta->deflink.vht_cap.vht_supported &&
!ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
phymode = ath11k_mac_get_phymode_vht(ar, sta);
} else if (sta->deflink.ht_cap.ht_supported &&
!ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
} else {
phymode = MODE_11A;
}
break;
default:
break;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM phymode %s\n",
sta->addr, ath11k_wmi_phymode_str(phymode));
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
}
static void ath11k_peer_assoc_prepare(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg,
bool reassoc)
{
struct ath11k_sta *arsta;
lockdep_assert_held(&ar->conf_mutex);
arsta = (struct ath11k_sta *)sta->drv_priv;
memset(arg, 0, sizeof(*arg));
reinit_completion(&ar->peer_assoc_done);
arg->peer_new_assoc = !reassoc;
ath11k_peer_assoc_h_basic(ar, vif, sta, arg);
ath11k_peer_assoc_h_crypto(ar, vif, sta, arg);
ath11k_peer_assoc_h_rates(ar, vif, sta, arg);
ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath11k_peer_assoc_h_ht(ar, vif, sta, arg);
ath11k_peer_assoc_h_vht(ar, vif, sta, arg);
ath11k_peer_assoc_h_he(ar, vif, sta, arg);
ath11k_peer_assoc_h_he_6ghz(ar, vif, sta, arg);
ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
ath11k_peer_assoc_h_smps(sta, arg);
arsta->peer_nss = arg->peer_nss;
/* TODO: amsdu_disable req? */
}
static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif,
const u8 *addr,
const struct ieee80211_sta_ht_cap *ht_cap,
u16 he_6ghz_capa)
{
int smps;
if (!ht_cap->ht_supported && !he_6ghz_capa)
return 0;
if (ht_cap->ht_supported) {
smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
} else {
smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa);
}
if (smps >= ARRAY_SIZE(ath11k_smps_map))
return -EINVAL;
return ath11k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE,
ath11k_smps_map[smps]);
}
static bool ath11k_mac_set_he_txbf_conf(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
u32 param, value;
int ret;
if (!arvif->vif->bss_conf.he_support)
return true;
param = WMI_VDEV_PARAM_SET_HEMU_MODE;
value = 0;
if (arvif->vif->bss_conf.he_su_beamformer) {
value |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE);
if (arvif->vif->bss_conf.he_mu_beamformer &&
arvif->vdev_type == WMI_VDEV_TYPE_AP)
value |= FIELD_PREP(HE_MODE_MU_TX_BFER, HE_MU_BFER_ENABLE);
}
if (arvif->vif->type != NL80211_IFTYPE_MESH_POINT) {
value |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
if (arvif->vif->bss_conf.he_full_ul_mumimo)
value |= FIELD_PREP(HE_MODE_UL_MUMIMO, HE_UL_MUMIMO_ENABLE);
if (arvif->vif->bss_conf.he_su_beamformee)
value |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
}
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value);
if (ret) {
ath11k_warn(ar->ab, "failed to set vdev %d HE MU mode: %d\n",
arvif->vdev_id, ret);
return false;
}
param = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
value = FIELD_PREP(HE_VHT_SOUNDING_MODE, HE_VHT_SOUNDING_MODE_ENABLE) |
FIELD_PREP(HE_TRIG_NONTRIG_SOUNDING_MODE,
HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE);
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param, value);
if (ret) {
ath11k_warn(ar->ab, "failed to set vdev %d sounding mode: %d\n",
arvif->vdev_id, ret);
return false;
}
return true;
}
static bool ath11k_mac_vif_recalc_sta_he_txbf(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta_he_cap *he_cap)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_he_cap_elem he_cap_elem = {0};
struct ieee80211_sta_he_cap *cap_band = NULL;
struct cfg80211_chan_def def;
u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE;
u32 hemode = 0;
int ret;
if (!vif->bss_conf.he_support)
return true;
if (vif->type != NL80211_IFTYPE_STATION)
return false;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return false;
if (def.chan->band == NL80211_BAND_2GHZ)
cap_band = &ar->mac.iftype[NL80211_BAND_2GHZ][vif->type].he_cap;
else
cap_band = &ar->mac.iftype[NL80211_BAND_5GHZ][vif->type].he_cap;
memcpy(&he_cap_elem, &cap_band->he_cap_elem, sizeof(he_cap_elem));
if (HECAP_PHY_SUBFME_GET(he_cap_elem.phy_cap_info)) {
if (HECAP_PHY_SUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
if (HECAP_PHY_MUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
hemode |= FIELD_PREP(HE_MODE_MU_TX_BFEE, HE_MU_BFEE_ENABLE);
}
if (vif->type != NL80211_IFTYPE_MESH_POINT) {
hemode |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
if (HECAP_PHY_ULMUMIMO_GET(he_cap_elem.phy_cap_info))
if (HECAP_PHY_ULMUMIMO_GET(he_cap->he_cap_elem.phy_cap_info))
hemode |= FIELD_PREP(HE_MODE_UL_MUMIMO,
HE_UL_MUMIMO_ENABLE);
if (FIELD_GET(HE_MODE_MU_TX_BFEE, hemode))
hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
if (FIELD_GET(HE_MODE_MU_TX_BFER, hemode))
hemode |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE);
}
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, hemode);
if (ret) {
ath11k_warn(ar->ab, "failed to submit vdev param txbf 0x%x: %d\n",
hemode, ret);
return false;
}
return true;
}
static void ath11k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct peer_assoc_params peer_arg;
struct ieee80211_sta *ap_sta;
struct ath11k_peer *peer;
bool is_auth = false;
struct ieee80211_sta_he_cap he_cap;
int ret;
lockdep_assert_held(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i assoc bssid %pM aid %d\n",
arvif->vdev_id, arvif->bssid, arvif->aid);
rcu_read_lock();
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!ap_sta) {
ath11k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
bss_conf->bssid, arvif->vdev_id);
rcu_read_unlock();
return;
}
/* he_cap here is updated at assoc success for sta mode only */
he_cap = ap_sta->deflink.he_cap;
ath11k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
rcu_read_unlock();
peer_arg.is_assoc = true;
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
return;
}
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
bss_conf->bssid, arvif->vdev_id);
return;
}
ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
&ap_sta->deflink.ht_cap,
le16_to_cpu(ap_sta->deflink.he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
return;
}
if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
arvif->vdev_id, bss_conf->bssid);
return;
}
WARN_ON(arvif->is_up);
arvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid,
NULL, 0, 0);
if (ret) {
ath11k_warn(ar->ab, "failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
return;
}
arvif->is_up = true;
arvif->rekey_data.enable_offload = false;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
if (peer && peer->is_authorized)
is_auth = true;
spin_unlock_bh(&ar->ab->base_lock);
if (is_auth) {
ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
arvif->vdev_id,
WMI_PEER_AUTHORIZE,
1);
if (ret)
ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
}
ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
&bss_conf->he_obss_pd);
if (ret)
ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
arvif->vdev_id, ret);
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DTIM_POLICY,
WMI_DTIM_POLICY_STICK);
if (ret)
ath11k_warn(ar->ab, "failed to set vdev %d dtim policy: %d\n",
arvif->vdev_id, ret);
ath11k_mac_11d_scan_stop_all(ar->ab);
}
static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
lockdep_assert_held(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i disassoc bssid %pM\n",
arvif->vdev_id, arvif->bssid);
ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
ath11k_warn(ar->ab, "failed to down vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->is_up = false;
memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
cancel_delayed_work_sync(&arvif->connection_loss_work);
}
static u32 ath11k_mac_get_rate_hw_value(int bitrate)
{
u32 preamble;
u16 hw_value;
int rate;
size_t i;
if (ath11k_mac_bitrate_is_cck(bitrate))
preamble = WMI_RATE_PREAMBLE_CCK;
else
preamble = WMI_RATE_PREAMBLE_OFDM;
for (i = 0; i < ARRAY_SIZE(ath11k_legacy_rates); i++) {
if (ath11k_legacy_rates[i].bitrate != bitrate)
continue;
hw_value = ath11k_legacy_rates[i].hw_value;
rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
return rate;
}
return -EINVAL;
}
static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *def)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
const struct ieee80211_supported_band *sband;
u8 basic_rate_idx;
int hw_rate_code;
u32 vdev_param;
u16 bitrate;
int ret;
lockdep_assert_held(&ar->conf_mutex);
sband = ar->hw->wiphy->bands[def->chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath11k_mac_get_rate_hw_value(bitrate);
if (hw_rate_code < 0) {
ath11k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
return;
}
vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
if (ret)
ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
/* For WCN6855, firmware will clear this param when vdev starts, hence
* cache it here so that we can reconfigure it once vdev starts.
*/
ar->hw_rate_code = hw_rate_code;
vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
if (ret)
ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
}
static int ath11k_mac_fils_discovery(struct ath11k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath11k *ar = arvif->ar;
struct sk_buff *tmpl;
int ret;
u32 interval;
bool unsol_bcast_probe_resp_enabled = false;
if (info->fils_discovery.max_interval) {
interval = info->fils_discovery.max_interval;
tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
if (tmpl)
ret = ath11k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
tmpl);
} else if (info->unsol_bcast_probe_resp_interval) {
unsol_bcast_probe_resp_enabled = 1;
interval = info->unsol_bcast_probe_resp_interval;
tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
arvif->vif);
if (tmpl)
ret = ath11k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
tmpl);
} else { /* Disable */
return ath11k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false);
}
if (!tmpl) {
ath11k_warn(ar->ab,
"mac vdev %i failed to retrieve %s template\n",
arvif->vdev_id, (unsol_bcast_probe_resp_enabled ?
"unsolicited broadcast probe response" :
"FILS discovery"));
return -EPERM;
}
kfree_skb(tmpl);
if (!ret)
ret = ath11k_wmi_fils_discovery(ar, arvif->vdev_id, interval,
unsol_bcast_probe_resp_enabled);
return ret;
}
static int ath11k_mac_config_obss_pd(struct ath11k *ar,
struct ieee80211_he_obss_pd *he_obss_pd)
{
u32 bitmap[2], param_id, param_val, pdev_id;
int ret;
s8 non_srg_th = 0, srg_th = 0;
pdev_id = ar->pdev->pdev_id;
/* Set and enable SRG/non-SRG OBSS PD Threshold */
param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
if (ret)
ath11k_warn(ar->ab,
"failed to set obss_pd_threshold for pdev: %u\n",
pdev_id);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"obss pd sr_ctrl %x non_srg_thres %u srg_max %u\n",
he_obss_pd->sr_ctrl, he_obss_pd->non_srg_max_offset,
he_obss_pd->max_offset);
param_val = 0;
if (he_obss_pd->sr_ctrl &
IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) {
non_srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD;
} else {
if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
non_srg_th = (ATH11K_OBSS_PD_MAX_THRESHOLD +
he_obss_pd->non_srg_max_offset);
else
non_srg_th = ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD;
param_val |= ATH11K_OBSS_PD_NON_SRG_EN;
}
if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->max_offset;
param_val |= ATH11K_OBSS_PD_SRG_EN;
}
if (test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
ar->ab->wmi_ab.svc_map)) {
param_val |= ATH11K_OBSS_PD_THRESHOLD_IN_DBM;
param_val |= FIELD_PREP(GENMASK(15, 8), srg_th);
} else {
non_srg_th -= ATH11K_DEFAULT_NOISE_FLOOR;
/* SRG not supported and threshold in dB */
param_val &= ~(ATH11K_OBSS_PD_SRG_EN |
ATH11K_OBSS_PD_THRESHOLD_IN_DBM);
}
param_val |= (non_srg_th & GENMASK(7, 0));
ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to set obss_pd_threshold for pdev: %u\n",
pdev_id);
return ret;
}
/* Enable OBSS PD for all access category */
param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC;
param_val = 0xf;
ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to set obss_pd_per_ac for pdev: %u\n",
pdev_id);
return ret;
}
/* Set SR Prohibit */
param_id = WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT;
param_val = !!(he_obss_pd->sr_ctrl &
IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED);
ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to set sr_prohibit for pdev: %u\n",
pdev_id);
return ret;
}
if (!test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
ar->ab->wmi_ab.svc_map))
return 0;
/* Set SRG BSS Color Bitmap */
memcpy(bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
ret = ath11k_wmi_pdev_set_srg_bss_color_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set bss_color_bitmap for pdev: %u\n",
pdev_id);
return ret;
}
/* Set SRG Partial BSSID Bitmap */
memcpy(bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
ret = ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set partial_bssid_bitmap for pdev: %u\n",
pdev_id);
return ret;
}
memset(bitmap, 0xff, sizeof(bitmap));
/* Enable all BSS Colors for SRG */
ret = ath11k_wmi_pdev_srg_obss_color_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set srg_color_en_bitmap pdev: %u\n",
pdev_id);
return ret;
}
/* Enable all partial BSSID mask for SRG */
ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set srg_bssid_en_bitmap pdev: %u\n",
pdev_id);
return ret;
}
/* Enable all BSS Colors for non-SRG */
ret = ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set non_srg_color_en_bitmap pdev: %u\n",
pdev_id);
return ret;
}
/* Enable all partial BSSID mask for non-SRG */
ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set non_srg_bssid_en_bitmap pdev: %u\n",
pdev_id);
return ret;
}
return 0;
}
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u64 changed)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
u32 vdev_param;
int mcast_rate;
u32 preamble;
u16 hw_value;
u16 bitrate;
int ret = 0;
u8 rateidx;
u32 rate, param;
u32 ipv4_cnt;
mutex_lock(&ar->conf_mutex);
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id,
arvif->beacon_interval);
if (ret)
ath11k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
arvif->vdev_id);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Beacon interval: %d set for VDEV: %d\n",
arvif->beacon_interval, arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON) {
param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
param_value = WMI_BEACON_STAGGERED_MODE;
ret = ath11k_wmi_pdev_set_param(ar, param_id,
param_value, ar->pdev->pdev_id);
if (ret)
ath11k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
arvif->vdev_id);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Set staggered beacon mode for VDEV: %d\n",
arvif->vdev_id);
if (!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) {
ret = ath11k_mac_setup_bcn_tmpl(arvif);
if (ret)
ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
ret);
}
if (arvif->bcca_zero_sent)
arvif->do_not_send_tmpl = true;
else
arvif->do_not_send_tmpl = false;
if (vif->bss_conf.he_support) {
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BA_MODE,
WMI_BA_MODE_BUFFER_SIZE_256);
if (ret)
ath11k_warn(ar->ab,
"failed to set BA BUFFER SIZE 256 for vdev: %d\n",
arvif->vdev_id);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Set BA BUFFER SIZE 256 for VDEV: %d\n",
arvif->vdev_id);
}
}
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
arvif->dtim_period = info->dtim_period;
param_id = WMI_VDEV_PARAM_DTIM_PERIOD;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id,
arvif->dtim_period);
if (ret)
ath11k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
arvif->vdev_id, ret);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"DTIM period: %d set for VDEV: %d\n",
arvif->dtim_period, arvif->vdev_id);
}
if (changed & BSS_CHANGED_SSID &&
vif->type == NL80211_IFTYPE_AP) {
arvif->u.ap.ssid_len = vif->cfg.ssid_len;
if (vif->cfg.ssid_len)
memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
vif->cfg.ssid_len);
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
ether_addr_copy(arvif->bssid, info->bssid);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (info->enable_beacon)
ath11k_mac_set_he_txbf_conf(arvif);
ath11k_control_beaconing(arvif, info);
if (arvif->is_up && vif->bss_conf.he_support &&
vif->bss_conf.he_oper.params) {
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
param_value = vif->bss_conf.he_oper.params;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"he oper param: %x set for VDEV: %d\n",
param_value, arvif->vdev_id);
if (ret)
ath11k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n",
param_value, arvif->vdev_id, ret);
}
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
u32 cts_prot;
cts_prot = !!(info->use_cts_prot);
param_id = WMI_VDEV_PARAM_PROTECTION_MODE;
if (arvif->is_started) {
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, cts_prot);
if (ret)
ath11k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
arvif->vdev_id);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
cts_prot, arvif->vdev_id);
} else {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
}
}
if (changed & BSS_CHANGED_ERP_SLOT) {
u32 slottime;
if (info->use_short_slot)
slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
else
slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
param_id = WMI_VDEV_PARAM_SLOT_TIME;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, slottime);
if (ret)
ath11k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
arvif->vdev_id);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Set slottime: %d for VDEV: %d\n",
slottime, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
u32 preamble;
if (info->use_short_preamble)
preamble = WMI_VDEV_PREAMBLE_SHORT;
else
preamble = WMI_VDEV_PREAMBLE_LONG;
param_id = WMI_VDEV_PARAM_PREAMBLE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, preamble);
if (ret)
ath11k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
arvif->vdev_id);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Set preamble: %d for VDEV: %d\n",
preamble, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc)
ath11k_bss_assoc(hw, vif, info);
else
ath11k_bss_disassoc(hw, vif);
}
if (changed & BSS_CHANGED_TXPOWER) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n",
arvif->vdev_id, info->txpower);
arvif->txpower = info->txpower;
ath11k_mac_txpower_recalc(ar);
}
if (changed & BSS_CHANGED_PS &&
ar->ab->hw_params.supports_sta_ps) {
arvif->ps = vif->cfg.ps;
ret = ath11k_mac_config_ps(ar);
if (ret)
ath11k_warn(ar->ab, "failed to setup ps on vdev %i: %d\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_MCAST_RATE &&
!ath11k_mac_vif_chan(arvif->vif, &def)) {
band = def.chan->band;
mcast_rate = vif->bss_conf.mcast_rate[band];
if (mcast_rate > 0)
rateidx = mcast_rate - 1;
else
rateidx = ffs(vif->bss_conf.basic_rates) - 1;
if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
rateidx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
bitrate = ath11k_legacy_rates[rateidx].bitrate;
hw_value = ath11k_legacy_rates[rateidx].hw_value;
if (ath11k_mac_bitrate_is_cck(bitrate))
preamble = WMI_RATE_PREAMBLE_CCK;
else
preamble = WMI_RATE_PREAMBLE_OFDM;
rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"vdev %d mcast_rate %x\n",
arvif->vdev_id, rate);
vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, rate);
if (ret)
ath11k_warn(ar->ab,
"failed to set mcast rate on vdev %i: %d\n",
arvif->vdev_id, ret);
vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, rate);
if (ret)
ath11k_warn(ar->ab,
"failed to set bcast rate on vdev %i: %d\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BASIC_RATES &&
!ath11k_mac_vif_chan(arvif->vif, &def))
ath11k_recalculate_mgmt_rate(ar, vif, &def);
if (changed & BSS_CHANGED_TWT) {
struct wmi_twt_enable_params twt_params = {0};
if (info->twt_requester || info->twt_responder) {
ath11k_wmi_fill_default_twt_params(&twt_params);
ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id,
&twt_params);
} else {
ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
}
}
if (changed & BSS_CHANGED_HE_OBSS_PD)
ath11k_mac_config_obss_pd(ar, &info->he_obss_pd);
if (changed & BSS_CHANGED_HE_BSS_COLOR) {
if (vif->type == NL80211_IFTYPE_AP) {
ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
ar, arvif->vdev_id, info->he_bss_color.color,
ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS,
info->he_bss_color.enabled);
if (ret)
ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
arvif->vdev_id, ret);
param_id = WMI_VDEV_PARAM_BSS_COLOR;
if (info->he_bss_color.enabled)
param_value = info->he_bss_color.color <<
IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET;
else
param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id,
param_value);
if (ret)
ath11k_warn(ar->ab,
"failed to set bss color param on vdev %i: %d\n",
arvif->vdev_id, ret);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"bss color param 0x%x set on vdev %i\n",
param_value, arvif->vdev_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar,
arvif->vdev_id,
1);
if (ret)
ath11k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n",
arvif->vdev_id, ret);
ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
ar, arvif->vdev_id, 0,
ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS, 1);
if (ret)
ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
arvif->vdev_id, ret);
}
}
if (changed & BSS_CHANGED_FTM_RESPONDER &&
arvif->ftm_responder != info->ftm_responder &&
test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map) &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT)) {
arvif->ftm_responder = info->ftm_responder;
param = WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
arvif->ftm_responder);
if (ret)
ath11k_warn(ar->ab, "Failed to set ftm responder %i: %d\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_FILS_DISCOVERY ||
changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
ath11k_mac_fils_discovery(arvif, info);
if (changed & BSS_CHANGED_ARP_FILTER) {
ipv4_cnt = min(vif->cfg.arp_addr_cnt, ATH11K_IPV4_MAX_COUNT);
memcpy(arvif->arp_ns_offload.ipv4_addr,
vif->cfg.arp_addr_list,
ipv4_cnt * sizeof(u32));
memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN);
arvif->arp_ns_offload.ipv4_count = ipv4_cnt;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
vif->cfg.arp_addr_cnt,
vif->addr, arvif->arp_ns_offload.ipv4_addr);
}
mutex_unlock(&ar->conf_mutex);
}
void __ath11k_mac_scan_finish(struct ath11k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
break;
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
if (ar->scan.is_roc && ar->scan.roc_notify)
ieee80211_remain_on_channel_expired(ar->hw);
fallthrough;
case ATH11K_SCAN_STARTING:
if (!ar->scan.is_roc) {
struct cfg80211_scan_info info = {
.aborted = ((ar->scan.state ==
ATH11K_SCAN_ABORTING) ||
(ar->scan.state ==
ATH11K_SCAN_STARTING)),
};
ieee80211_scan_completed(ar->hw, &info);
}
ar->scan.state = ATH11K_SCAN_IDLE;
ar->scan_channel = NULL;
ar->scan.roc_freq = 0;
cancel_delayed_work(&ar->scan.timeout);
complete_all(&ar->scan.completed);
break;
}
}
void ath11k_mac_scan_finish(struct ath11k *ar)
{
spin_lock_bh(&ar->data_lock);
__ath11k_mac_scan_finish(ar);
spin_unlock_bh(&ar->data_lock);
}
static int ath11k_scan_stop(struct ath11k *ar)
{
struct scan_cancel_param arg = {
.req_type = WLAN_SCAN_CANCEL_SINGLE,
.scan_id = ATH11K_SCAN_ID,
};
int ret;
lockdep_assert_held(&ar->conf_mutex);
/* TODO: Fill other STOP Params */
arg.pdev_id = ar->pdev->pdev_id;
ret = ath11k_wmi_send_scan_stop_cmd(ar, &arg);
if (ret) {
ath11k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
goto out;
}
ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
if (ret == 0) {
ath11k_warn(ar->ab,
"failed to receive scan abort comple: timed out\n");
ret = -ETIMEDOUT;
} else if (ret > 0) {
ret = 0;
}
out:
/* Scan state should be updated upon scan completion but in case
* firmware fails to deliver the event (for whatever reason) it is
* desired to clean up scan state anyway. Firmware may have just
* dropped the scan completion event delivery due to transport pipe
* being overflown with data and/or it can recover on its own before
* next scan request is submitted.
*/
spin_lock_bh(&ar->data_lock);
if (ar->scan.state != ATH11K_SCAN_IDLE)
__ath11k_mac_scan_finish(ar);
spin_unlock_bh(&ar->data_lock);
return ret;
}
static void ath11k_scan_abort(struct ath11k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
/* This can happen if timeout worker kicked in and called
* abortion while scan completion was being processed.
*/
break;
case ATH11K_SCAN_STARTING:
case ATH11K_SCAN_ABORTING:
ath11k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
ar->scan.state);
break;
case ATH11K_SCAN_RUNNING:
ar->scan.state = ATH11K_SCAN_ABORTING;
spin_unlock_bh(&ar->data_lock);
ret = ath11k_scan_stop(ar);
if (ret)
ath11k_warn(ar->ab, "failed to abort scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
break;
}
spin_unlock_bh(&ar->data_lock);
}
static void ath11k_scan_timeout_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
scan.timeout.work);
mutex_lock(&ar->conf_mutex);
ath11k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
}
static int ath11k_start_scan(struct ath11k *ar,
struct scan_req_params *arg)
{
int ret;
unsigned long timeout = 1 * HZ;
lockdep_assert_held(&ar->conf_mutex);
if (ath11k_spectral_get_mode(ar) == ATH11K_SPECTRAL_BACKGROUND)
ath11k_spectral_reset_buffer(ar);
ret = ath11k_wmi_send_scan_start_cmd(ar, arg);
if (ret)
return ret;
if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) {
timeout = 5 * HZ;
if (ar->supports_6ghz)
timeout += 5 * HZ;
}
ret = wait_for_completion_timeout(&ar->scan.started, timeout);
if (ret == 0) {
ret = ath11k_scan_stop(ar);
if (ret)
ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
return -ETIMEDOUT;
}
/* If we failed to start the scan, return error code at
* this point. This is probably due to some issue in the
* firmware, but no need to wedge the driver due to that...
*/
spin_lock_bh(&ar->data_lock);
if (ar->scan.state == ATH11K_SCAN_IDLE) {
spin_unlock_bh(&ar->data_lock);
return -EINVAL;
}
spin_unlock_bh(&ar->data_lock);
return 0;
}
static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
struct scan_req_params *arg = NULL;
int ret = 0;
int i;
u32 scan_timeout;
/* Firmwares advertising the support of triggering 11D algorithm
* on the scan results of a regular scan expects driver to send
* WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID.
* With this feature, separate 11D scan can be avoided since
* regdomain can be determined with the scan results of the
* regular scan.
*/
if (ar->state_11d == ATH11K_11D_PREPARING &&
test_bit(WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN,
ar->ab->wmi_ab.svc_map))
ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
reinit_completion(&ar->scan.started);
reinit_completion(&ar->scan.completed);
ar->scan.state = ATH11K_SCAN_STARTING;
ar->scan.is_roc = false;
ar->scan.vdev_id = arvif->vdev_id;
ret = 0;
break;
case ATH11K_SCAN_STARTING:
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
ret = -EBUSY;
break;
}
spin_unlock_bh(&ar->data_lock);
if (ret)
goto exit;
arg = kzalloc(sizeof(*arg), GFP_KERNEL);
if (!arg) {
ret = -ENOMEM;
goto exit;
}
ath11k_wmi_start_scan_init(ar, arg);
arg->vdev_id = arvif->vdev_id;
arg->scan_id = ATH11K_SCAN_ID;
if (req->ie_len) {
arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
if (!arg->extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
arg->extraie.len = req->ie_len;
}
if (req->n_ssids) {
arg->num_ssids = req->n_ssids;
for (i = 0; i < arg->num_ssids; i++) {
arg->ssid[i].length = req->ssids[i].ssid_len;
memcpy(&arg->ssid[i].ssid, req->ssids[i].ssid,
req->ssids[i].ssid_len);
}
} else {
arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
}
if (req->n_channels) {
arg->num_chan = req->n_channels;
arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
GFP_KERNEL);
if (!arg->chan_list) {
ret = -ENOMEM;
goto exit;
}
for (i = 0; i < arg->num_chan; i++) {
if (test_bit(WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL,
ar->ab->wmi_ab.svc_map)) {
arg->chan_list[i] =
u32_encode_bits(req->channels[i]->center_freq,
WMI_SCAN_CONFIG_PER_CHANNEL_MASK);
/* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan
* flags, then scan all PSC channels in 6 GHz band and
* those non-PSC channels where RNR IE is found during
* the legacy 2.4/5 GHz scan.
* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set,
* then all channels in 6 GHz will be scanned.
*/
if (req->channels[i]->band == NL80211_BAND_6GHZ &&
req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ &&
!cfg80211_channel_is_psc(req->channels[i]))
arg->chan_list[i] |=
WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND;
} else {
arg->chan_list[i] = req->channels[i]->center_freq;
}
}
}
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
arg->scan_f_add_spoofed_mac_in_probe = 1;
ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
}
/* if duration is set, default dwell times will be overwritten */
if (req->duration) {
arg->dwell_time_active = req->duration;
arg->dwell_time_active_2g = req->duration;
arg->dwell_time_active_6g = req->duration;
arg->dwell_time_passive = req->duration;
arg->dwell_time_passive_6g = req->duration;
arg->burst_duration = req->duration;
scan_timeout = min_t(u32, arg->max_rest_time *
(arg->num_chan - 1) + (req->duration +
ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
arg->num_chan, arg->max_scan_time);
} else {
scan_timeout = arg->max_scan_time;
}
/* Add a margin to account for event/command processing */
scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD;
ret = ath11k_start_scan(ar, arg);
if (ret) {
ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH11K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
}
ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
msecs_to_jiffies(scan_timeout));
exit:
if (arg) {
kfree(arg->chan_list);
kfree(arg->extraie.ptr);
kfree(arg);
}
mutex_unlock(&ar->conf_mutex);
if (ar->state_11d == ATH11K_11D_PREPARING)
ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
return ret;
}
static void ath11k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
ath11k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
}
static int ath11k_install_key(struct ath11k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
const u8 *macaddr, u32 flags)
{
int ret;
struct ath11k *ar = arvif->ar;
struct wmi_vdev_install_key_arg arg = {
.vdev_id = arvif->vdev_id,
.key_idx = key->keyidx,
.key_len = key->keylen,
.key_data = key->key,
.key_flags = flags,
.macaddr = macaddr,
};
lockdep_assert_held(&arvif->ar->conf_mutex);
reinit_completion(&ar->install_key_done);
if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
return 0;
if (cmd == DISABLE_KEY) {
arg.key_cipher = WMI_CIPHER_NONE;
arg.key_data = NULL;
goto install;
}
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
arg.key_cipher = WMI_CIPHER_AES_CCM;
/* TODO: Re-check if flag is valid */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
case WLAN_CIPHER_SUITE_TKIP:
arg.key_cipher = WMI_CIPHER_TKIP;
arg.key_txmic_len = 8;
arg.key_rxmic_len = 8;
break;
case WLAN_CIPHER_SUITE_CCMP_256:
arg.key_cipher = WMI_CIPHER_AES_CCM;
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
arg.key_cipher = WMI_CIPHER_AES_GCM;
break;
default:
ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
return -EOPNOTSUPP;
}
if (test_bit(ATH11K_FLAG_RAW_MODE, &ar->ab->dev_flags))
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
install:
ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg);
if (ret)
return ret;
if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
return -ETIMEDOUT;
return ar->install_key_status ? -EINVAL : 0;
}
static int ath11k_clear_peer_keys(struct ath11k_vif *arvif,
const u8 *addr)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
int first_errno = 0;
int ret;
int i;
u32 flags = 0;
lockdep_assert_held(&ar->conf_mutex);
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
spin_unlock_bh(&ab->base_lock);
if (!peer)
return -ENOENT;
for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
if (!peer->keys[i])
continue;
/* key flags are not required to delete the key */
ret = ath11k_install_key(arvif, peer->keys[i],
DISABLE_KEY, addr, flags);
if (ret < 0 && first_errno == 0)
first_errno = ret;
if (ret < 0)
ath11k_warn(ab, "failed to remove peer key %d: %d\n",
i, ret);
spin_lock_bh(&ab->base_lock);
peer->keys[i] = NULL;
spin_unlock_bh(&ab->base_lock);
}
return first_errno;
}
static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
const u8 *peer_addr;
int ret = 0;
u32 flags = 0;
/* BIP needs to be done in software */
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
return 1;
if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
return 1;
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
mutex_lock(&ar->conf_mutex);
if (sta)
peer_addr = sta->addr;
else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
peer_addr = vif->bss_conf.bssid;
else
peer_addr = vif->addr;
key->hw_key_idx = key->keyidx;
/* the peer should not disappear in mid-way (unless FW goes awry) since
* we already hold conf_mutex. we just make sure its there now.
*/
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
/* flush the fragments cache during key (re)install to
* ensure all frags in the new frag list belong to the same key.
*/
if (peer && sta && cmd == SET_KEY)
ath11k_peer_frags_flush(ar, peer);
spin_unlock_bh(&ab->base_lock);
if (!peer) {
if (cmd == SET_KEY) {
ath11k_warn(ab, "cannot install key for non-existent peer %pM\n",
peer_addr);
ret = -EOPNOTSUPP;
goto exit;
} else {
/* if the peer doesn't exist there is no key to disable
* anymore
*/
goto exit;
}
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
flags |= WMI_KEY_PAIRWISE;
else
flags |= WMI_KEY_GROUP;
ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
goto exit;
}
ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
if (ret) {
ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret);
goto exit;
}
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
if (peer && cmd == SET_KEY) {
peer->keys[key->keyidx] = key;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
peer->ucast_keyidx = key->keyidx;
peer->sec_type = ath11k_dp_tx_get_encrypt_type(key->cipher);
} else {
peer->mcast_keyidx = key->keyidx;
peer->sec_type_grp = ath11k_dp_tx_get_encrypt_type(key->cipher);
}
} else if (peer && cmd == DISABLE_KEY) {
peer->keys[key->keyidx] = NULL;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
peer->ucast_keyidx = 0;
else
peer->mcast_keyidx = 0;
} else if (!peer)
/* impossible unless FW goes crazy */
ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr);
if (sta) {
arsta = (struct ath11k_sta *)sta->drv_priv;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (cmd == SET_KEY)
arsta->pn_type = HAL_PN_TYPE_WPA;
else
arsta->pn_type = HAL_PN_TYPE_NONE;
break;
default:
arsta->pn_type = HAL_PN_TYPE_NONE;
break;
}
}
spin_unlock_bh(&ab->base_lock);
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int
ath11k_mac_bitrate_mask_num_ht_rates(struct ath11k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
int i;
for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
num_rates += hweight8(mask->control[band].ht_mcs[i]);
return num_rates;
}
static int
ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
int i;
for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
num_rates += hweight16(mask->control[band].vht_mcs[i]);
return num_rates;
}
static int
ath11k_mac_bitrate_mask_num_he_rates(struct ath11k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
int i;
for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++)
num_rates += hweight16(mask->control[band].he_mcs[i]);
return num_rates;
}
static int
ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
struct ieee80211_sta *sta,
const struct cfg80211_bitrate_mask *mask,
enum nl80211_band band)
{
struct ath11k *ar = arvif->ar;
u8 vht_rate, nss;
u32 rate_code;
int ret, i;
lockdep_assert_held(&ar->conf_mutex);
nss = 0;
for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
nss = i + 1;
vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1;
}
}
if (!nss) {
ath11k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
sta->addr);
return -EINVAL;
}
/* Avoid updating invalid nss as fixed rate*/
if (nss > sta->deflink.rx_nss)
return -EINVAL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
sta->addr);
rate_code = ATH11K_HW_RATE_CODE(vht_rate, nss - 1,
WMI_RATE_PREAMBLE_VHT);
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
rate_code);
if (ret)
ath11k_warn(ar->ab,
"failed to update STA %pM Fixed Rate %d: %d\n",
sta->addr, rate_code, ret);
return ret;
}
static int
ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif,
struct ieee80211_sta *sta,
const struct cfg80211_bitrate_mask *mask,
enum nl80211_band band)
{
struct ath11k *ar = arvif->ar;
u8 he_rate, nss;
u32 rate_code;
int ret, i;
lockdep_assert_held(&ar->conf_mutex);
nss = 0;
for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
if (hweight16(mask->control[band].he_mcs[i]) == 1) {
nss = i + 1;
he_rate = ffs(mask->control[band].he_mcs[i]) - 1;
}
}
if (!nss) {
ath11k_warn(ar->ab, "No single he fixed rate found to set for %pM",
sta->addr);
return -EINVAL;
}
/* Avoid updating invalid nss as fixed rate */
if (nss > sta->deflink.rx_nss)
return -EINVAL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"setting fixed he rate for peer %pM, device will not switch to any other selected rates",
sta->addr);
rate_code = ATH11K_HW_RATE_CODE(he_rate, nss - 1,
WMI_RATE_PREAMBLE_HE);
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
rate_code);
if (ret)
ath11k_warn(ar->ab,
"failed to update sta %pM fixed rate %d: %d\n",
sta->addr, rate_code, ret);
return ret;
}
static int
ath11k_mac_set_peer_ht_fixed_rate(struct ath11k_vif *arvif,
struct ieee80211_sta *sta,
const struct cfg80211_bitrate_mask *mask,
enum nl80211_band band)
{
struct ath11k *ar = arvif->ar;
u8 ht_rate, nss = 0;
u32 rate_code;
int ret, i;
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
nss = i + 1;
ht_rate = ffs(mask->control[band].ht_mcs[i]) - 1;
}
}
if (!nss) {
ath11k_warn(ar->ab, "No single HT Fixed rate found to set for %pM",
sta->addr);
return -EINVAL;
}
/* Avoid updating invalid nss as fixed rate*/
if (nss > sta->deflink.rx_nss)
return -EINVAL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Setting Fixed HT Rate for peer %pM. Device will not switch to any other selected rates",
sta->addr);
rate_code = ATH11K_HW_RATE_CODE(ht_rate, nss - 1,
WMI_RATE_PREAMBLE_HT);
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
rate_code);
if (ret)
ath11k_warn(ar->ab,
"failed to update STA %pM HT Fixed Rate %d: %d\n",
sta->addr, rate_code, ret);
return ret;
}
static int ath11k_station_assoc(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
bool reassoc)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct peer_assoc_params peer_arg;
int ret = 0;
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
u8 num_ht_rates, num_vht_rates, num_he_rates;
lockdep_assert_held(&ar->conf_mutex);
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return -EPERM;
band = def.chan->band;
mask = &arvif->bitrate_mask;
ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
peer_arg.is_assoc = true;
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
sta->addr, arvif->vdev_id);
return -ETIMEDOUT;
}
num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask);
num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask);
/* If single VHT/HE rate is configured (by set_bitrate_mask()),
* peer_assoc will disable VHT/HE. This is now enabled by a peer specific
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
if (ret)
return ret;
} else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
band);
if (ret)
return ret;
} else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) {
ret = ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask,
band);
if (ret)
return ret;
}
/* Re-assoc is run only to update supported rates for given station. It
* doesn't make much sense to reconfigure the peer completely.
*/
if (reassoc)
return 0;
ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
&sta->deflink.ht_cap,
le16_to_cpu(sta->deflink.he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
if (!sta->wme) {
arvif->num_legacy_stations++;
ret = ath11k_recalc_rtscts_prot(arvif);
if (ret)
return ret;
}
if (sta->wme && sta->uapsd_queues) {
ret = ath11k_peer_assoc_qos_ap(ar, arvif, sta);
if (ret) {
ath11k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_station_disassoc(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
if (!sta->wme) {
arvif->num_legacy_stations--;
ret = ath11k_recalc_rtscts_prot(arvif);
if (ret)
return ret;
}
ret = ath11k_clear_peer_keys(arvif, sta->addr);
if (ret) {
ath11k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
return 0;
}
static void ath11k_sta_rc_update_wk(struct work_struct *wk)
{
struct ath11k *ar;
struct ath11k_vif *arvif;
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
const u16 *he_mcs_mask;
u32 changed, bw, nss, smps, bw_prev;
int err, num_ht_rates, num_vht_rates, num_he_rates;
const struct cfg80211_bitrate_mask *mask;
struct peer_assoc_params peer_arg;
enum wmi_phy_mode peer_phymode;
arsta = container_of(wk, struct ath11k_sta, update_wk);
sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
arvif = arsta->arvif;
ar = arvif->ar;
if (WARN_ON(ath11k_mac_vif_chan(arvif->vif, &def)))
return;
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
spin_lock_bh(&ar->data_lock);
changed = arsta->changed;
arsta->changed = 0;
bw = arsta->bw;
bw_prev = arsta->bw_prev;
nss = arsta->nss;
smps = arsta->smps;
spin_unlock_bh(&ar->data_lock);
mutex_lock(&ar->conf_mutex);
nss = max_t(u32, 1, nss);
nss = min(nss, max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
ath11k_mac_max_vht_nss(vht_mcs_mask)),
ath11k_mac_max_he_nss(he_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
/* Get the peer phymode */
ath11k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
peer_phymode = peer_arg.peer_phymode;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM peer bw %d phymode %d\n",
sta->addr, bw, peer_phymode);
if (bw > bw_prev) {
/* BW is upgraded. In this case we send WMI_PEER_PHYMODE
* followed by WMI_PEER_CHWIDTH
*/
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW upgrade for sta %pM new BW %d, old BW %d\n",
sta->addr, bw, bw_prev);
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_PHYMODE, peer_phymode);
if (err) {
ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
sta->addr, peer_phymode, err);
goto err_rc_bw_changed;
}
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_CHWIDTH, bw);
if (err)
ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
sta->addr, bw, err);
} else {
/* BW is downgraded. In this case we send WMI_PEER_CHWIDTH
* followed by WMI_PEER_PHYMODE
*/
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW downgrade for sta %pM new BW %d,old BW %d\n",
sta->addr, bw, bw_prev);
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_CHWIDTH, bw);
if (err) {
ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
sta->addr, bw, err);
goto err_rc_bw_changed;
}
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_PHYMODE, peer_phymode);
if (err)
ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
sta->addr, peer_phymode, err);
}
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM nss %d\n",
sta->addr, nss);
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_NSS, nss);
if (err)
ath11k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
sta->addr, nss, err);
}
if (changed & IEEE80211_RC_SMPS_CHANGED) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM smps %d\n",
sta->addr, smps);
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE, smps);
if (err)
ath11k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
sta->addr, smps, err);
}
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
mask = &arvif->bitrate_mask;
num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band,
mask);
num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
mask);
num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
mask);
/* Peer_assoc_prepare will reject vht rates in
* bitrate_mask if its not available in range format and
* sets vht tx_rateset as unsupported. So multiple VHT MCS
* setting(eg. MCS 4,5,6) per peer is not supported here.
* But, Single rate in VHT mask can be set as per-peer
* fixed rate. But even if any HT rates are configured in
* the bitrate mask, device will not switch to those rates
* when per-peer Fixed rate is set.
* TODO: Check RATEMASK_CMDID to support auto rates selection
* across HT/VHT and for multiple VHT MCS support.
*/
if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
} else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
band);
} else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) {
ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask,
band);
} else {
/* If the peer is non-VHT/HE or no fixed VHT/HE rate
* is provided in the new bitrate mask we set the
* other rates using peer_assoc command. Also clear
* the peer fixed rate settings as it has higher proprity
* than peer assoc
*/
err = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
WMI_FIXED_RATE_NONE);
if (err)
ath11k_warn(ar->ab,
"failed to disable peer fixed rate for sta %pM: %d\n",
sta->addr, err);
ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
&peer_arg, true);
peer_arg.is_assoc = false;
err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (err)
ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, err);
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
sta->addr, arvif->vdev_id);
}
}
err_rc_bw_changed:
mutex_unlock(&ar->conf_mutex);
}
static void ath11k_sta_set_4addr_wk(struct work_struct *wk)
{
struct ath11k *ar;
struct ath11k_vif *arvif;
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
int ret = 0;
arsta = container_of(wk, struct ath11k_sta, set_4addr_wk);
sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
arvif = arsta->arvif;
ar = arvif->ar;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"setting USE_4ADDR for peer %pM\n", sta->addr);
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret)
ath11k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n",
sta->addr, ret);
}
static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
struct ieee80211_sta *sta)
{
struct ath11k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return 0;
if (ar->num_stations >= ar->max_num_stations)
return -ENOBUFS;
ar->num_stations++;
return 0;
}
static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
struct ieee80211_sta *sta)
{
struct ath11k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return;
ar->num_stations--;
}
static int ath11k_mac_station_add(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct peer_create_params peer_param;
int ret;
lockdep_assert_held(&ar->conf_mutex);
ret = ath11k_mac_inc_num_stations(arvif, sta);
if (ret) {
ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
ar->max_num_stations);
goto exit;
}
arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
if (!arsta->rx_stats) {
ret = -ENOMEM;
goto dec_num_station;
}
peer_param.vdev_id = arvif->vdev_id;
peer_param.peer_addr = sta->addr;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
if (ret) {
ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
goto free_rx_stats;
}
ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
if (!arsta->tx_stats) {
ret = -ENOMEM;
goto free_peer;
}
}
if (ieee80211_vif_is_mesh(vif)) {
ath11k_dbg(ab, ATH11K_DBG_MAC,
"setting USE_4ADDR for mesh STA %pM\n", sta->addr);
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret) {
ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
sta->addr, ret);
goto free_tx_stats;
}
}
ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
if (ret) {
ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
sta->addr, arvif->vdev_id, ret);
goto free_tx_stats;
}
if (ab->hw_params.vdev_start_delay &&
!arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
ret = ath11k_start_vdev_delay(ar->hw, vif);
if (ret) {
ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
goto free_tx_stats;
}
}
ewma_avg_rssi_init(&arsta->avg_rssi);
return 0;
free_tx_stats:
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
free_peer:
ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
free_rx_stats:
kfree(arsta->rx_stats);
arsta->rx_stats = NULL;
dec_num_station:
ath11k_mac_dec_num_stations(arvif, sta);
exit:
return ret;
}
static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
struct ieee80211_sta *sta)
{
u32 bw = WMI_PEER_CHWIDTH_20MHZ;
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_20:
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
case IEEE80211_STA_RX_BW_40:
bw = WMI_PEER_CHWIDTH_40MHZ;
break;
case IEEE80211_STA_RX_BW_80:
bw = WMI_PEER_CHWIDTH_80MHZ;
break;
case IEEE80211_STA_RX_BW_160:
bw = WMI_PEER_CHWIDTH_160MHZ;
break;
default:
ath11k_warn(ar->ab, "Invalid bandwidth %d for %pM\n",
sta->deflink.bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
}
return bw;
}
static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k_peer *peer;
int ret = 0;
/* cancel must be done outside the mutex to avoid deadlock */
if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
cancel_work_sync(&arsta->update_wk);
cancel_work_sync(&arsta->set_4addr_wk);
}
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
ret = ath11k_mac_station_add(ar, vif, sta);
if (ret)
ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
vif->type == NL80211_IFTYPE_STATION;
ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
if (!skip_peer_delete) {
ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath11k_warn(ar->ab,
"Failed to delete peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
else
ath11k_dbg(ar->ab,
ATH11K_DBG_MAC,
"Removed peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
}
ath11k_mac_dec_num_stations(arvif, sta);
mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (skip_peer_delete && peer) {
peer->sta = NULL;
} else if (peer && peer->sta == sta) {
ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
vif->addr, arvif->vdev_id);
ath11k_peer_rhash_delete(ar->ab, peer);
peer->sta = NULL;
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
kfree(arsta->rx_stats);
arsta->rx_stats = NULL;
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath11k_station_assoc(ar, vif, sta, false);
if (ret)
ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
sta->addr);
spin_lock_bh(&ar->data_lock);
/* Set arsta bw and prev bw */
arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
arsta->bw_prev = arsta->bw;
spin_unlock_bh(&ar->data_lock);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (peer)
peer->is_authorized = true;
spin_unlock_bh(&ar->ab->base_lock);
if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_AUTHORIZE,
1);
if (ret)
ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
sta->addr, arvif->vdev_id, ret);
}
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (peer)
peer->is_authorized = false;
spin_unlock_bh(&ar->ab->base_lock);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath11k_station_disassoc(ar, vif, sta);
if (ret)
ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
sta->addr);
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret = 0;
s16 txpwr;
if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
} else {
txpwr = sta->deflink.txpwr.power;
if (!txpwr)
return -EINVAL;
}
if (txpwr > ATH11K_TX_POWER_MAX_VAL || txpwr < ATH11K_TX_POWER_MIN_VAL)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_USE_FIXED_PWR, txpwr);
if (ret) {
ath11k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
ret);
goto out;
}
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enabled)
{
struct ath11k *ar = hw->priv;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
if (enabled && !arsta->use_4addr_set) {
ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk);
arsta->use_4addr_set = true;
}
}
static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 changed)
{
struct ath11k *ar = hw->priv;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_peer *peer;
u32 bw, smps;
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
ath11k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
sta->addr, arvif->vdev_id);
return;
}
spin_unlock_bh(&ar->ab->base_lock);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
sta->addr, changed, sta->deflink.bandwidth,
sta->deflink.rx_nss,
sta->deflink.smps_mode);
spin_lock_bh(&ar->data_lock);
if (changed & IEEE80211_RC_BW_CHANGED) {
bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
arsta->bw_prev = arsta->bw;
arsta->bw = bw;
}
if (changed & IEEE80211_RC_NSS_CHANGED)
arsta->nss = sta->deflink.rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
break;
case IEEE80211_SMPS_STATIC:
smps = WMI_PEER_SMPS_STATIC;
break;
case IEEE80211_SMPS_DYNAMIC:
smps = WMI_PEER_SMPS_DYNAMIC;
break;
default:
ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
sta->deflink.smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
arsta->smps = smps;
}
arsta->changed |= changed;
spin_unlock_bh(&ar->data_lock);
ieee80211_queue_work(hw, &arsta->update_wk);
}
static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif,
u16 ac, bool enable)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
u32 value = 0;
int ret = 0;
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
return 0;
switch (ac) {
case IEEE80211_AC_VO:
value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
break;
case IEEE80211_AC_VI:
value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
break;
case IEEE80211_AC_BE:
value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
break;
case IEEE80211_AC_BK:
value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
break;
}
if (enable)
arvif->u.sta.uapsd |= value;
else
arvif->u.sta.uapsd &= ~value;
ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
WMI_STA_PS_PARAM_UAPSD,
arvif->u.sta.uapsd);
if (ret) {
ath11k_warn(ar->ab, "could not set uapsd params %d\n", ret);
goto exit;
}
if (arvif->u.sta.uapsd)
value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
else
value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
WMI_STA_PS_PARAM_RX_WAKE_POLICY,
value);
if (ret)
ath11k_warn(ar->ab, "could not set rx wake param %d\n", ret);
exit:
return ret;
}
static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct wmi_wmm_params_arg *p = NULL;
int ret;
mutex_lock(&ar->conf_mutex);
switch (ac) {
case IEEE80211_AC_VO:
p = &arvif->wmm_params.ac_vo;
break;
case IEEE80211_AC_VI:
p = &arvif->wmm_params.ac_vi;
break;
case IEEE80211_AC_BE:
p = &arvif->wmm_params.ac_be;
break;
case IEEE80211_AC_BK:
p = &arvif->wmm_params.ac_bk;
break;
}
if (WARN_ON(!p)) {
ret = -EINVAL;
goto exit;
}
p->cwmin = params->cw_min;
p->cwmax = params->cw_max;
p->aifs = params->aifs;
p->txop = params->txop;
ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
&arvif->wmm_params);
if (ret) {
ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
goto exit;
}
ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
ath11k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static struct ieee80211_sta_ht_cap
ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
{
int i;
struct ieee80211_sta_ht_cap ht_cap = {0};
u32 ar_vht_cap = ar->pdev->cap.vht_cap;
if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
return ht_cap;
ht_cap.ht_supported = 1;
ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
if (ar_ht_cap & WMI_HT_CAP_HT20_SGI)
ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
if (ar_ht_cap & WMI_HT_CAP_HT40_SGI)
ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) {
u32 smps;
smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
ht_cap.cap |= smps;
}
if (ar_ht_cap & WMI_HT_CAP_TX_STBC)
ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
if (ar_ht_cap & WMI_HT_CAP_RX_STBC) {
u32 stbc;
stbc = ar_ht_cap;
stbc &= WMI_HT_CAP_RX_STBC;
stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
stbc &= IEEE80211_HT_CAP_RX_STBC;
ht_cap.cap |= stbc;
}
if (ar_ht_cap & WMI_HT_CAP_RX_LDPC)
ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT)
ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
for (i = 0; i < ar->num_rx_chains; i++) {
if (rate_cap_rx_chainmask & BIT(i))
ht_cap.mcs.rx_mask[i] = 0xFF;
}
ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
return ht_cap;
}
static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
{
u32 value = 0;
struct ath11k *ar = arvif->ar;
int nsts;
int sound_dim;
u32 vht_cap = ar->pdev->cap.vht_cap;
u32 vdev_param = WMI_VDEV_PARAM_TXBF;
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
if (nsts > (ar->num_rx_chains - 1))
nsts = ar->num_rx_chains - 1;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
sound_dim = vht_cap &
IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
if (sound_dim > (ar->num_tx_chains - 1))
sound_dim = ar->num_tx_chains - 1;
value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
}
if (!value)
return 0;
if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) {
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
arvif->vdev_type == WMI_VDEV_TYPE_AP)
value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
}
/* TODO: SUBFEE not validated in HK, disable here until validated? */
if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
arvif->vdev_type == WMI_VDEV_TYPE_STA)
value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
}
return ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, value);
}
static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
{
bool subfer, subfee;
int sound_dim = 0, nsts = 0;
subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
if (ar->num_tx_chains < 2) {
*vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
subfer = false;
}
if (ar->num_rx_chains < 2) {
*vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
subfee = false;
}
/* If SU Beaformer is not set, then disable MU Beamformer Capability */
if (!subfer)
*vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
/* If SU Beaformee is not set, then disable MU Beamformee Capability */
if (!subfee)
*vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
sound_dim = (*vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
*vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
*vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
/* Enable Sounding Dimension Field only if SU BF is enabled */
if (subfer) {
if (sound_dim > (ar->num_tx_chains - 1))
sound_dim = ar->num_tx_chains - 1;
sound_dim <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
*vht_cap |= sound_dim;
}
/* Enable Beamformee STS Field only if SU BF is enabled */
if (subfee) {
if (nsts > (ar->num_rx_chains - 1))
nsts = ar->num_rx_chains - 1;
nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
*vht_cap |= nsts;
}
}
static struct ieee80211_sta_vht_cap
ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
u32 rate_cap_rx_chainmask)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 txmcs_map, rxmcs_map;
int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->pdev->cap.vht_cap;
if (ar->pdev->cap.nss_ratio_enabled)
vht_cap.vht_mcs.tx_highest |=
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
rxmcs_map = 0;
txmcs_map = 0;
for (i = 0; i < 8; i++) {
if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
else
txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
else
rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
}
if (rate_cap_tx_chainmask <= 1)
vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
return vht_cap;
}
static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
struct ath11k_pdev_cap *cap,
u32 *ht_cap_info)
{
struct ieee80211_supported_band *band;
u32 rate_cap_tx_chainmask;
u32 rate_cap_rx_chainmask;
u32 ht_cap;
rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
if (ht_cap_info)
*ht_cap_info = ht_cap;
band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
rate_cap_rx_chainmask);
}
if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
(ar->ab->hw_params.single_pdev_only ||
!ar->supports_6ghz)) {
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
if (ht_cap_info)
*ht_cap_info = ht_cap;
band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
rate_cap_rx_chainmask);
band->vht_cap = ath11k_create_vht_cap(ar, rate_cap_tx_chainmask,
rate_cap_rx_chainmask);
}
}
static int ath11k_check_chain_mask(struct ath11k *ar, u32 ant, bool is_tx_ant)
{
/* TODO: Check the request chainmask against the supported
* chainmask table which is advertised in extented_service_ready event
*/
return 0;
}
static void ath11k_gen_ppe_thresh(struct ath11k_ppe_threshold *fw_ppet,
u8 *he_ppet)
{
int nss, ru;
u8 bit = 7;
he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK;
he_ppet[0] |= (fw_ppet->ru_bit_mask <<
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
for (ru = 0; ru < 4; ru++) {
u8 val;
int i;
if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
continue;
val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
0x3f;
val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
for (i = 5; i >= 0; i--) {
he_ppet[bit / 8] |=
((val >> i) & 0x1) << ((bit % 8));
bit++;
}
}
}
}
static void
ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
{
u8 m;
m = IEEE80211_HE_MAC_CAP0_TWT_RES |
IEEE80211_HE_MAC_CAP0_TWT_REQ;
he_cap_elem->mac_cap_info[0] &= ~m;
m = IEEE80211_HE_MAC_CAP2_TRS |
IEEE80211_HE_MAC_CAP2_BCAST_TWT |
IEEE80211_HE_MAC_CAP2_MU_CASCADING;
he_cap_elem->mac_cap_info[2] &= ~m;
m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED |
IEEE80211_HE_MAC_CAP2_BCAST_TWT |
IEEE80211_HE_MAC_CAP2_MU_CASCADING;
he_cap_elem->mac_cap_info[3] &= ~m;
m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG |
IEEE80211_HE_MAC_CAP4_BQR;
he_cap_elem->mac_cap_info[4] &= ~m;
m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION |
IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
he_cap_elem->mac_cap_info[5] &= ~m;
m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
he_cap_elem->phy_cap_info[2] &= ~m;
m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
he_cap_elem->phy_cap_info[3] &= ~m;
m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
he_cap_elem->phy_cap_info[4] &= ~m;
m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
he_cap_elem->phy_cap_info[5] &= ~m;
m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
he_cap_elem->phy_cap_info[6] &= ~m;
m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR |
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
he_cap_elem->phy_cap_info[7] &= ~m;
m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
he_cap_elem->phy_cap_info[8] &= ~m;
m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
he_cap_elem->phy_cap_info[9] &= ~m;
}
static __le16 ath11k_mac_setup_he_6ghz_cap(struct ath11k_pdev_cap *pcap,
struct ath11k_band_cap *bcap)
{
u8 val;
bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE;
if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
bcap->he_6ghz_capa |=
FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
WLAN_HT_CAP_SM_PS_DYNAMIC);
else
bcap->he_6ghz_capa |=
FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
WLAN_HT_CAP_SM_PS_DISABLED);
val = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
pcap->vht_cap);
bcap->he_6ghz_capa |=
FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, val);
val = FIELD_GET(IEEE80211_VHT_CAP_MAX_MPDU_MASK, pcap->vht_cap);
bcap->he_6ghz_capa |=
FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN, val);
if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN)
bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS;
return cpu_to_le16(bcap->he_6ghz_capa);
}
static void ath11k_mac_set_hemcsmap(struct ath11k *ar,
struct ath11k_pdev_cap *cap,
struct ieee80211_sta_he_cap *he_cap,
int band)
{
u16 txmcs_map, rxmcs_map;
u32 i;
rxmcs_map = 0;
txmcs_map = 0;
for (i = 0; i < 8; i++) {
if (i < ar->num_tx_chains &&
(ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
txmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
else
txmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
if (i < ar->num_rx_chains &&
(ar->cfg_rx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
rxmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
else
rxmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
}
he_cap->he_mcs_nss_supp.rx_mcs_80 =
cpu_to_le16(rxmcs_map & 0xffff);
he_cap->he_mcs_nss_supp.tx_mcs_80 =
cpu_to_le16(txmcs_map & 0xffff);
he_cap->he_mcs_nss_supp.rx_mcs_160 =
cpu_to_le16(rxmcs_map & 0xffff);
he_cap->he_mcs_nss_supp.tx_mcs_160 =
cpu_to_le16(txmcs_map & 0xffff);
he_cap->he_mcs_nss_supp.rx_mcs_80p80 =
cpu_to_le16(rxmcs_map & 0xffff);
he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
cpu_to_le16(txmcs_map & 0xffff);
}
static int ath11k_mac_copy_he_cap(struct ath11k *ar,
struct ath11k_pdev_cap *cap,
struct ieee80211_sband_iftype_data *data,
int band)
{
int i, idx = 0;
for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
struct ath11k_band_cap *band_cap = &cap->band[band];
struct ieee80211_he_cap_elem *he_cap_elem =
&he_cap->he_cap_elem;
switch (i) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
break;
default:
continue;
}
data[idx].types_mask = BIT(i);
he_cap->has_he = true;
memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
sizeof(he_cap_elem->mac_cap_info));
memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
sizeof(he_cap_elem->phy_cap_info));
he_cap_elem->mac_cap_info[1] &=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
he_cap_elem->phy_cap_info[5] &=
~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
switch (i) {
case NL80211_IFTYPE_AP:
he_cap_elem->phy_cap_info[3] &=
~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[0] &=
~IEEE80211_HE_MAC_CAP0_TWT_RES;
he_cap_elem->mac_cap_info[0] |=
IEEE80211_HE_MAC_CAP0_TWT_REQ;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
break;
case NL80211_IFTYPE_MESH_POINT:
ath11k_mac_filter_he_cap_mesh(he_cap_elem);
break;
}
ath11k_mac_set_hemcsmap(ar, cap, he_cap, band);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
ath11k_gen_ppe_thresh(&band_cap->he_ppet,
he_cap->ppe_thres);
if (band == NL80211_BAND_6GHZ) {
data[idx].he_6ghz_capa.capa =
ath11k_mac_setup_he_6ghz_cap(cap, band_cap);
}
idx++;
}
return idx;
}
static void ath11k_mac_setup_he_cap(struct ath11k *ar,
struct ath11k_pdev_cap *cap)
{
struct ieee80211_supported_band *band;
int count;
if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
count = ath11k_mac_copy_he_cap(ar, cap,
ar->mac.iftype[NL80211_BAND_2GHZ],
NL80211_BAND_2GHZ);
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ];
band->n_iftype_data = count;
}
if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
count = ath11k_mac_copy_he_cap(ar, cap,
ar->mac.iftype[NL80211_BAND_5GHZ],
NL80211_BAND_5GHZ);
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ];
band->n_iftype_data = count;
}
if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
ar->supports_6ghz) {
count = ath11k_mac_copy_he_cap(ar, cap,
ar->mac.iftype[NL80211_BAND_6GHZ],
NL80211_BAND_6GHZ);
band = &ar->mac.sbands[NL80211_BAND_6GHZ];
band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ];
band->n_iftype_data = count;
}
}
static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ath11k_check_chain_mask(ar, tx_ant, true))
return -EINVAL;
if (ath11k_check_chain_mask(ar, rx_ant, false))
return -EINVAL;
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
if (ar->state != ATH11K_STATE_ON &&
ar->state != ATH11K_STATE_RESTARTED)
return 0;
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
tx_ant, ar->pdev->pdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
ret, tx_ant);
return ret;
}
ar->num_tx_chains = get_num_chains(tx_ant);
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
rx_ant, ar->pdev->pdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
ret, rx_ant);
return ret;
}
ar->num_rx_chains = get_num_chains(rx_ant);
/* Reload HT/VHT/HE capability */
ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
ath11k_mac_setup_he_cap(ar, &ar->pdev->cap);
return 0;
}
static void ath11k_mgmt_over_wmi_tx_drop(struct ath11k *ar, struct sk_buff *skb)
{
int num_mgmt;
ieee80211_free_txskb(ar->hw, skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
if (num_mgmt < 0)
WARN_ON_ONCE(1);
if (!num_mgmt)
wake_up(&ar->txmgmt_empty_waitq);
}
static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id)
{
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
spin_lock_bh(&ar->txmgmt_idr_lock);
msdu = idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
if (!msdu)
return;
dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
ath11k_mgmt_over_wmi_tx_drop(ar, msdu);
}
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
{
struct ath11k *ar = ctx;
ath11k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
{
struct ieee80211_vif *vif = ctx;
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
struct ath11k *ar = skb_cb->ar;
if (skb_cb->vif == vif)
ath11k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
struct sk_buff *skb)
{
struct ath11k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info;
dma_addr_t paddr;
int buf_id;
int ret;
ATH11K_SKB_CB(skb)->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
spin_unlock_bh(&ar->txmgmt_idr_lock);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"tx mgmt frame, buf id %d\n", buf_id);
if (buf_id < 0)
return -ENOSPC;
info = IEEE80211_SKB_CB(skb);
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(skb, IEEE80211_CCMP_MIC_LEN);
}
}
paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, paddr)) {
ath11k_warn(ab, "failed to DMA map mgmt Tx buffer\n");
ret = -EIO;
goto err_free_idr;
}
ATH11K_SKB_CB(skb)->paddr = paddr;
ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
goto err_unmap_buf;
}
return 0;
err_unmap_buf:
dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr,
skb->len, DMA_TO_DEVICE);
err_free_idr:
spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
return ret;
}
static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
}
static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work);
struct ath11k_skb_cb *skb_cb;
struct ath11k_vif *arvif;
struct sk_buff *skb;
int ret;
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
skb_cb = ATH11K_SKB_CB(skb);
if (!skb_cb->vif) {
ath11k_warn(ar->ab, "no vif found for mgmt frame\n");
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
continue;
}
arvif = ath11k_vif_to_arvif(skb_cb->vif);
mutex_lock(&ar->conf_mutex);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
arvif->vdev_id, ret);
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
} else {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"tx mgmt frame, vdev_id %d\n",
arvif->vdev_id);
}
} else {
ath11k_warn(ar->ab,
"dropping mgmt frame for vdev %d, is_started %d\n",
arvif->vdev_id,
arvif->is_started);
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
}
mutex_unlock(&ar->conf_mutex);
}
}
static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
bool is_prb_rsp)
{
struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
/* Drop probe response packets when the pending management tx
* count has reached a certain threshold, so as to prioritize
* other mgmt packets like auth and assoc to be sent on time
* for establishing successful connections.
*/
if (is_prb_rsp &&
atomic_read(&ar->num_pending_mgmt_tx) > ATH11K_PRB_RSP_DROP_THRESHOLD) {
ath11k_warn(ar->ab,
"dropping probe response as pending queue is almost full\n");
return -ENOSPC;
}
if (skb_queue_len_lockless(q) >= ATH11K_TX_MGMT_NUM_PENDING_MAX) {
ath11k_warn(ar->ab, "mgmt tx queue is full\n");
return -ENOSPC;
}
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
queue_work(ar->ab->workqueue_aux, &ar->wmi_mgmt_tx_work);
return 0;
}
static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct ath11k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
struct ath11k_sta *arsta = NULL;
u32 info_flags = info->flags;
bool is_prb_rsp;
int ret;
memset(skb_cb, 0, sizeof(*skb_cb));
skb_cb->vif = vif;
if (key) {
skb_cb->cipher = key->cipher;
skb_cb->flags |= ATH11K_SKB_CIPHER_SET;
}
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
ath11k_warn(ar->ab, "failed to queue management frame %d\n",
ret);
ieee80211_free_txskb(ar->hw, skb);
}
return;
}
if (control->sta)
arsta = (struct ath11k_sta *)control->sta->drv_priv;
ret = ath11k_dp_tx(ar, arvif, arsta, skb);
if (unlikely(ret)) {
ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
}
}
void ath11k_mac_drain_tx(struct ath11k *ar)
{
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
cancel_work_sync(&ar->wmi_mgmt_tx_work);
ath11k_mgmt_over_wmi_tx_purge(ar);
}
static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
{
struct htt_rx_ring_tlv_filter tlv_filter = {0};
struct ath11k_base *ab = ar->ab;
int i, ret = 0;
u32 ring_id;
if (enable) {
tlv_filter = ath11k_mac_mon_status_filter_default;
if (ath11k_debugfs_rx_filter(ar))
tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
}
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
ar->dp.mac_id + i,
HAL_RXDMA_MONITOR_STATUS,
DP_RX_BUFFER_SIZE,
&tlv_filter);
}
if (enable && !ar->ab->hw_params.rxdma1_enable)
mod_timer(&ar->ab->mon_reap_timer, jiffies +
msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
return ret;
}
static void ath11k_mac_wait_reconfigure(struct ath11k_base *ab)
{
int recovery_start_count;
if (!ab->is_reset)
return;
recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
if (recovery_start_count == ab->num_radios) {
complete(&ab->recovery_start);
ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery started success\n");
}
ath11k_dbg(ab, ATH11K_DBG_MAC, "waiting reconfigure...\n");
wait_for_completion_timeout(&ab->reconfigure_complete,
ATH11K_RECONFIGURE_TIMEOUT_HZ);
}
static int ath11k_mac_op_start(struct ieee80211_hw *hw)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_pdev *pdev = ar->pdev;
int ret;
if (ath11k_ftm_mode) {
ath11k_warn(ab, "mac operations not supported in factory test mode\n");
return -EOPNOTSUPP;
}
ath11k_mac_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
case ATH11K_STATE_OFF:
ar->state = ATH11K_STATE_ON;
break;
case ATH11K_STATE_RESTARTING:
ar->state = ATH11K_STATE_RESTARTED;
ath11k_mac_wait_reconfigure(ab);
break;
case ATH11K_STATE_RESTARTED:
case ATH11K_STATE_WEDGED:
case ATH11K_STATE_ON:
case ATH11K_STATE_FTM:
WARN_ON(1);
ret = -EINVAL;
goto err;
}
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
1, pdev->pdev_id);
if (ret) {
ath11k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
goto err;
}
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
pdev->pdev_id);
if (ret) {
ath11k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
goto err;
}
if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
ret = ath11k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
if (ret) {
ath11k_err(ab, "failed to set prob req oui: %i\n", ret);
goto err;
}
}
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
0, pdev->pdev_id);
if (ret) {
ath11k_err(ab, "failed to set ac override for ARP: %d\n",
ret);
goto err;
}
ret = ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
if (ret) {
ath11k_err(ab, "failed to offload radar detection: %d\n",
ret);
goto err;
}
ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_DEFAULT);
if (ret) {
ath11k_err(ab, "failed to req ppdu stats: %d\n", ret);
goto err;
}
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
1, pdev->pdev_id);
if (ret) {
ath11k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
goto err;
}
__ath11k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
/* TODO: Do we need to enable ANI? */
ath11k_reg_update_chan_list(ar, false);
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
ar->num_peers = 0;
ar->allocated_vdev_map = 0;
/* Configure monitor status ring with default rx_filter to get rx status
* such as rssi, rx_duration.
*/
ret = ath11k_mac_config_mon_status_default(ar, true);
if (ret) {
ath11k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
ret);
goto err;
}
/* Configure the hash seed for hash based reo dest ring selection */
ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
/* allow device to enter IMPS */
if (ab->hw_params.idle_ps) {
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1, pdev->pdev_id);
if (ret) {
ath11k_err(ab, "failed to enable idle ps: %d\n", ret);
goto err;
}
}
mutex_unlock(&ar->conf_mutex);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
&ab->pdevs[ar->pdev_idx]);
return 0;
err:
ar->state = ATH11K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
{
struct ath11k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
ath11k_mac_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
ret = ath11k_mac_config_mon_status_default(ar, false);
if (ret)
ath11k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
ret);
clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
ar->state = ATH11K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->update_11d_work);
if (ar->state_11d == ATH11K_11D_PREPARING) {
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
}
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
kfree(ppdu_stats);
}
spin_unlock_bh(&ar->data_lock);
rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif,
u32 *flags, u32 *tx_vdev_id)
{
struct ath11k *ar = arvif->ar;
struct ath11k_vif *tx_arvif;
struct ieee80211_vif *tx_vif;
*tx_vdev_id = 0;
tx_vif = arvif->vif->mbssid_tx_vif;
if (!tx_vif) {
*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
return 0;
}
tx_arvif = ath11k_vif_to_arvif(tx_vif);
if (arvif->vif->bss_conf.nontransmitted) {
if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
return -EINVAL;
*flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP;
*tx_vdev_id = ath11k_vif_to_arvif(tx_vif)->vdev_id;
} else if (tx_arvif == arvif) {
*flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
} else {
return -EINVAL;
}
if (arvif->vif->bss_conf.ema_ap)
*flags |= WMI_HOST_VDEV_FLAGS_EMA_MODE;
return 0;
}
static int ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif,
struct vdev_create_params *params)
{
struct ath11k *ar = arvif->ar;
struct ath11k_pdev *pdev = ar->pdev;
int ret;
params->if_id = arvif->vdev_id;
params->type = arvif->vdev_type;
params->subtype = arvif->vdev_subtype;
params->pdev_id = pdev->pdev_id;
params->mbssid_flags = 0;
params->mbssid_tx_vdev_id = 0;
if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
ar->ab->wmi_ab.svc_map)) {
ret = ath11k_mac_setup_vdev_params_mbssid(arvif,
¶ms->mbssid_flags,
¶ms->mbssid_tx_vdev_id);
if (ret)
return ret;
}
if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
params->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
params->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
}
if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
}
if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
ar->supports_6ghz) {
params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
}
return 0;
}
static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
u32 param_id, param_value;
int ret;
param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET ||
(vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_AP))
vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
IEEE80211_OFFLOAD_DECAP_ENABLED);
if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
param_value = ATH11K_HW_TXRX_ETHERNET;
else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
param_value = ATH11K_HW_TXRX_RAW;
else
param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
arvif->vdev_id, ret);
vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
}
param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
param_value = ATH11K_HW_TXRX_ETHERNET;
else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
param_value = ATH11K_HW_TXRX_RAW;
else
param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath11k_warn(ab, "failed to set vdev %d rx decap mode: %d\n",
arvif->vdev_id, ret);
vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED;
}
}
static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
struct ath11k_vif *arvif;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_AP)
return true;
}
}
return false;
}
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
{
struct wmi_11d_scan_start_params param;
int ret;
mutex_lock(&ar->ab->vdev_id_11d_lock);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev id for 11d scan %d\n",
ar->vdev_id_11d_scan);
if (ar->regdom_set_by_user)
goto fin;
if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID)
goto fin;
if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
goto fin;
if (ath11k_mac_vif_ap_active_any(ar->ab))
goto fin;
param.vdev_id = vdev_id;
param.start_interval_msec = 0;
param.scan_period_msec = ATH11K_SCAN_11D_INTERVAL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "start 11d scan\n");
ret = ath11k_wmi_send_11d_scan_start_cmd(ar, ¶m);
if (ret) {
ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
vdev_id, ret);
} else {
ar->vdev_id_11d_scan = vdev_id;
if (ar->state_11d == ATH11K_11D_PREPARING)
ar->state_11d = ATH11K_11D_RUNNING;
}
fin:
if (ar->state_11d == ATH11K_11D_PREPARING) {
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
}
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
void ath11k_mac_11d_scan_stop(struct ath11k *ar)
{
int ret;
u32 vdev_id;
if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
return;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d scan\n");
mutex_lock(&ar->ab->vdev_id_11d_lock);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d vdev id %d\n",
ar->vdev_id_11d_scan);
if (ar->state_11d == ATH11K_11D_PREPARING) {
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
}
if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) {
vdev_id = ar->vdev_id_11d_scan;
ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to stopt 11d scan vdev %d ret: %d\n",
vdev_id, ret);
} else {
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
}
}
mutex_unlock(&ar->ab->vdev_id_11d_lock);
}
void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i;
ath11k_dbg(ab, ATH11K_DBG_MAC, "stop soc 11d scan\n");
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
ath11k_mac_11d_scan_stop(ar);
}
}
static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif)
{
unsigned long time_left;
struct ieee80211_vif *vif = arvif->vif;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_delete_done);
ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to delete WMI vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
ATH11K_VDEV_DELETE_TIMEOUT_HZ);
if (time_left == 0) {
ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
return -ETIMEDOUT;
}
ar->ab->free_vdev_map |= 1LL << (arvif->vdev_id);
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ar->num_created_vdevs--;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
return ret;
}
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct vdev_create_params vdev_param = {0};
struct peer_create_params peer_param;
u32 param_id, param_value;
u16 nss;
int i;
int ret, fbret;
int bit;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
mutex_lock(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_AP &&
ar->num_peers > (ar->max_num_peers - 1)) {
ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
ret = -ENOBUFS;
goto err;
}
if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) {
ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n",
ar->num_created_vdevs, TARGET_NUM_VDEVS(ab));
ret = -EBUSY;
goto err;
}
/* In the case of hardware recovery, debugfs files are
* not deleted since ieee80211_ops.remove_interface() is
* not invoked. In such cases, try to delete the files.
* These will be re-created later.
*/
ath11k_debugfs_remove_interface(arvif);
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
arvif->vif = vif;
INIT_LIST_HEAD(&arvif->list);
INIT_DELAYED_WORK(&arvif->connection_loss_work,
ath11k_mac_vif_sta_connection_loss_work);
for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
arvif->bitrate_mask.control[i].legacy = 0xffffffff;
arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI;
memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].ht_mcs));
memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].he_mcs));
}
bit = __ffs64(ab->free_vdev_map);
arvif->vdev_id = bit;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
switch (vif->type) {
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = bit;
break;
default:
WARN_ON(1);
break;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "add interface id %d type %d subtype %d map %llx\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
ab->free_vdev_map);
vif->cab_queue = arvif->vdev_id % (ATH11K_HW_MAX_QUEUES - 1);
for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
vif->hw_queue[i] = i % (ATH11K_HW_MAX_QUEUES - 1);
ret = ath11k_mac_setup_vdev_create_params(arvif, &vdev_param);
if (ret) {
ath11k_warn(ab, "failed to create vdev parameters %d: %d\n",
arvif->vdev_id, ret);
goto err;
}
ret = ath11k_wmi_vdev_create(ar, vif->addr, &vdev_param);
if (ret) {
ath11k_warn(ab, "failed to create WMI vdev %d: %d\n",
arvif->vdev_id, ret);
goto err;
}
ar->num_created_vdevs++;
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
spin_lock_bh(&ar->data_lock);
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
ath11k_mac_op_update_vif_offload(hw, vif);
nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_NSS, nss);
if (ret) {
ath11k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
goto err_vdev_del;
}
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
peer_param.vdev_id = arvif->vdev_id;
peer_param.peer_addr = vif->addr;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
ret = ath11k_peer_create(ar, arvif, NULL, &peer_param);
if (ret) {
ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
arvif->vdev_id, ret);
goto err_vdev_del;
}
ret = ath11k_mac_set_kickout(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
ath11k_mac_11d_scan_stop_all(ar->ab);
break;
case WMI_VDEV_TYPE_STA:
param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath11k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath11k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath11k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id,
WMI_STA_PS_MODE_DISABLED);
if (ret) {
ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
reinit_completion(&ar->completed_11d_scan);
ar->state_11d = ATH11K_11D_PREPARING;
}
break;
case WMI_VDEV_TYPE_MONITOR:
set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
break;
default:
break;
}
arvif->txpower = vif->bss_conf.txpower;
ret = ath11k_mac_txpower_recalc(ar);
if (ret)
goto err_peer_del;
param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
param_value = ar->hw->wiphy->rts_threshold;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath11k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
}
ath11k_dp_vdev_tx_attach(ar, arvif);
ath11k_debugfs_add_interface(arvif);
if (vif->type != NL80211_IFTYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_vdev_create(ar);
if (ret)
ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
ret);
}
mutex_unlock(&ar->conf_mutex);
return 0;
err_peer_del:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
fbret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
if (fbret) {
ath11k_warn(ar->ab, "fallback fail to delete peer addr %pM vdev_id %d ret %d\n",
vif->addr, arvif->vdev_id, fbret);
goto err;
}
}
err_vdev_del:
ath11k_mac_vdev_delete(ar, arvif);
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx)
{
struct ieee80211_vif *vif = (struct ieee80211_vif *)ctx;
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
if (skb_cb->vif == vif)
skb_cb->vif = NULL;
return 0;
}
static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_base *ab = ar->ab;
int ret;
int i;
cancel_delayed_work_sync(&arvif->connection_loss_work);
mutex_lock(&ar->conf_mutex);
ath11k_dbg(ab, ATH11K_DBG_MAC, "remove interface (vdev %d)\n",
arvif->vdev_id);
ret = ath11k_spectral_vif_stop(arvif);
if (ret)
ath11k_warn(ab, "failed to stop spectral for vdev %i: %d\n",
arvif->vdev_id, ret);
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
ath11k_mac_11d_scan_stop(ar);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
if (ret)
ath11k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
arvif->vdev_id, ret);
}
ret = ath11k_mac_vdev_delete(ar, arvif);
if (ret) {
ath11k_warn(ab, "failed to delete vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_vdev_del;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->monitor_vdev_id = -1;
} else if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags) &&
!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_vdev_delete(ar);
if (ret)
/* continue even if there's an error */
ath11k_warn(ar->ab, "failed to delete vdev monitor during remove interface: %d",
ret);
}
err_vdev_del:
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
ath11k_peer_cleanup(ar, arvif->vdev_id);
idr_for_each(&ar->txmgmt_idr,
ath11k_mac_vif_txmgmt_idr_remove, vif);
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
idr_for_each(&ab->dp.tx_ring[i].txbuf_idr,
ath11k_mac_vif_unref, vif);
spin_unlock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
}
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
ath11k_debugfs_remove_interface(arvif);
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
}
/* FIXME: Has to be verified. */
#define SUPPORTED_FILTERS \
(FIF_ALLMULTI | \
FIF_CONTROL | \
FIF_PSPOLL | \
FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_PROBE_REQ | \
FIF_FCSFAIL)
static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
u64 multicast)
{
struct ath11k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
mutex_unlock(&ar->conf_mutex);
}
static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct ath11k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
*tx_ant = ar->cfg_tx_chainmask;
*rx_ant = ar->cfg_rx_chainmask;
mutex_unlock(&ar->conf_mutex);
return 0;
}
static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct ath11k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
ret = __ath11k_set_antenna(ar, tx_ant, rx_ant);
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_mac_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
struct ath11k *ar = hw->priv;
int ret = -EINVAL;
mutex_lock(&ar->conf_mutex);
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
ret = ath11k_dp_rx_ampdu_start(ar, params);
break;
case IEEE80211_AMPDU_RX_STOP:
ret = ath11k_dp_rx_ampdu_stop(ar, params);
break;
case IEEE80211_AMPDU_TX_START:
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
case IEEE80211_AMPDU_TX_OPERATIONAL:
/* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211
* Tx aggregation requests.
*/
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
ath11k_dbg(ab, ATH11K_DBG_MAC,
"chanctx add freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
/* TODO: In case of multiple channel context, populate rx_channel from
* Rx PPDU desc information.
*/
ar->rx_channel = ctx->def.chan;
spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
return 0;
}
static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
ath11k_dbg(ab, ATH11K_DBG_MAC,
"chanctx remove freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
/* TODO: In case of there is one more channel context left, populate
* rx_channel with the channel of that remaining channel context.
*/
ar->rx_channel = NULL;
spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
}
static int
ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
struct ieee80211_chanctx_conf *ctx,
bool restart)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct wmi_vdev_start_req_arg arg = {};
const struct cfg80211_chan_def *chandef = &ctx->def;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
arg.vdev_id = arvif->vdev_id;
arg.dtim_period = arvif->dtim_period;
arg.bcn_intval = arvif->beacon_interval;
arg.channel.freq = chandef->chan->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
arg.channel.band_center_freq2 = chandef->center_freq2;
arg.channel.mode =
ath11k_phymodes[chandef->chan->band][chandef->width];
arg.channel.min_power = 0;
arg.channel.max_power = chandef->chan->max_power;
arg.channel.max_reg_power = chandef->chan->max_reg_power;
arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
arg.mbssid_flags = 0;
arg.mbssid_tx_vdev_id = 0;
if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
ar->ab->wmi_ab.svc_map)) {
ret = ath11k_mac_setup_vdev_params_mbssid(arvif,
&arg.mbssid_flags,
&arg.mbssid_tx_vdev_id);
if (ret)
return ret;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
arg.ssid_len = arvif->u.ap.ssid_len;
arg.hidden_ssid = arvif->u.ap.hidden_ssid;
/* For now allow DFS for AP mode */
arg.channel.chan_radar =
!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
arg.channel.freq2_radar = ctx->radar_enabled;
arg.channel.passive = arg.channel.chan_radar;
spin_lock_bh(&ab->base_lock);
arg.regdomain = ar->ab->dfs_region;
spin_unlock_bh(&ab->base_lock);
}
arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
ath11k_dbg(ab, ATH11K_DBG_MAC,
"vdev %d start center_freq %d phymode %s\n",
arg.vdev_id, arg.channel.freq,
ath11k_wmi_phymode_str(arg.channel.mode));
ret = ath11k_wmi_vdev_start(ar, &arg, restart);
if (ret) {
ath11k_warn(ar->ab, "failed to %s WMI vdev %i\n",
restart ? "restart" : "start", arg.vdev_id);
return ret;
}
ret = ath11k_mac_vdev_setup_sync(ar);
if (ret) {
ath11k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n",
arg.vdev_id, restart ? "restart" : "start", ret);
return ret;
}
if (!restart)
ar->num_started_vdevs++;
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
/* Enable CAC Flag in the driver by checking the channel DFS cac time,
* i.e dfs_cac_ms value which will be valid only for radar channels
* and state as NL80211_DFS_USABLE which indicates CAC needs to be
* done before channel usage. This flags is used to drop rx packets.
* during CAC.
*/
/* TODO Set the flag for other interface types as required */
if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
chandef->chan->dfs_cac_ms &&
chandef->chan->dfs_state == NL80211_DFS_USABLE) {
set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
ath11k_dbg(ab, ATH11K_DBG_MAC,
"CAC Started in chan_freq %d for vdev %d\n",
arg.channel.freq, arg.vdev_id);
}
ret = ath11k_mac_set_txbf_conf(arvif);
if (ret)
ath11k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
arvif->vdev_id, ret);
return 0;
}
static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
ret = ath11k_mac_vdev_setup_sync(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
WARN_ON(ar->num_started_vdevs == 0);
ar->num_started_vdevs--;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "CAC Stopped for vdev %d\n",
arvif->vdev_id);
}
return 0;
err:
return ret;
}
static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
return ath11k_mac_vdev_start_restart(arvif, ctx, false);
}
static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
return ath11k_mac_vdev_start_restart(arvif, ctx, true);
}
struct ath11k_mac_change_chanctx_arg {
struct ieee80211_chanctx_conf *ctx;
struct ieee80211_vif_chanctx_switch *vifs;
int n_vifs;
int next_vif;
};
static void
ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath11k_mac_change_chanctx_arg *arg = data;
if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
return;
arg->n_vifs++;
}
static void
ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath11k_mac_change_chanctx_arg *arg = data;
struct ieee80211_chanctx_conf *ctx;
ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
if (ctx != arg->ctx)
return;
if (WARN_ON(arg->next_vif == arg->n_vifs))
return;
arg->vifs[arg->next_vif].vif = vif;
arg->vifs[arg->next_vif].old_ctx = ctx;
arg->vifs[arg->next_vif].new_ctx = ctx;
arg->next_vif++;
}
static void
ath11k_mac_update_vif_chan(struct ath11k *ar,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif, *tx_arvif = NULL;
struct ieee80211_vif *mbssid_tx_vif;
int ret;
int i;
bool monitor_vif = false;
lockdep_assert_held(&ar->conf_mutex);
/* Associated channel resources of all relevant vdevs
* should be available for the channel switch now.
*/
/* TODO: Update ar->rx_channel */
for (i = 0; i < n_vifs; i++) {
arvif = ath11k_vif_to_arvif(vifs[i].vif);
if (WARN_ON(!arvif->is_started))
continue;
/* change_chanctx can be called even before vdev_up from
* ieee80211_start_ap->ieee80211_vif_use_channel->
* ieee80211_recalc_radar_chanctx.
*
* Firmware expect vdev_restart only if vdev is up.
* If vdev is down then it expect vdev_stop->vdev_start.
*/
if (arvif->is_up) {
ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx);
if (ret) {
ath11k_warn(ab, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
} else {
ret = ath11k_mac_vdev_stop(arvif);
if (ret) {
ath11k_warn(ab, "failed to stop vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx);
if (ret)
ath11k_warn(ab, "failed to start vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
ret = ath11k_mac_setup_bcn_tmpl(arvif);
if (ret)
ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
mbssid_tx_vif = arvif->vif->mbssid_tx_vif;
if (mbssid_tx_vif)
tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
arvif->vif->bss_conf.bssid_index,
1 << arvif->vif->bss_conf.bssid_indicator);
if (ret) {
ath11k_warn(ab, "failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
/* Restart the internal monitor vdev on new channel */
if (!monitor_vif &&
test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_stop(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to stop monitor during vif channel update: %d",
ret);
return;
}
ret = ath11k_mac_monitor_start(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to start monitor during vif channel update: %d",
ret);
return;
}
}
}
static void
ath11k_mac_update_active_vif_chan(struct ath11k *ar,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k_mac_change_chanctx_arg arg = { .ctx = ctx };
lockdep_assert_held(&ar->conf_mutex);
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath11k_mac_change_chanctx_cnt_iter,
&arg);
if (arg.n_vifs == 0)
return;
arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
if (!arg.vifs)
return;
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath11k_mac_change_chanctx_fill_iter,
&arg);
ath11k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
kfree(arg.vifs);
}
static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
mutex_lock(&ar->conf_mutex);
ath11k_dbg(ab, ATH11K_DBG_MAC,
"chanctx change freq %u width %d ptr %p changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
* switch_vif_chanctx().
*/
if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
goto unlock;
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
changed & IEEE80211_CHANCTX_CHANGE_RADAR)
ath11k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
unlock:
mutex_unlock(&ar->conf_mutex);
}
static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
if (WARN_ON(arvif->is_started))
return -EBUSY;
ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
arvif->chanctx.def.chan->center_freq, ret);
return ret;
}
/* Reconfigure hardware rate code since it is cleared by firmware.
*/
if (ar->hw_rate_code > 0) {
u32 vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
ar->hw_rate_code);
if (ret) {
ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
return ret;
}
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr,
NULL, 0, 0);
if (ret) {
ath11k_warn(ab, "failed put monitor up: %d\n", ret);
return ret;
}
}
arvif->is_started = true;
/* TODO: Setup ps and cts/rts protection */
return 0;
}
static int
ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
struct peer_create_params param;
mutex_lock(&ar->conf_mutex);
ath11k_dbg(ab, ATH11K_DBG_MAC,
"chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
!ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
}
if (WARN_ON(arvif->is_started)) {
ret = -EBUSY;
goto out;
}
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr;
ret = ath11k_peer_create(ar, arvif, NULL, ¶m);
if (ret) {
ath11k_warn(ab, "failed to create peer after vdev start delay: %d",
ret);
goto out;
}
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_mac_monitor_start(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
ret);
goto out;
}
arvif->is_started = true;
goto out;
}
ret = ath11k_mac_vdev_start(arvif, ctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
ctx->def.chan->center_freq, ret);
goto out;
}
arvif->is_started = true;
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_start(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
ret);
goto out;
}
}
/* TODO: Setup ps and cts/rts protection */
ret = 0;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void
ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_peer *peer;
int ret;
mutex_lock(&ar->conf_mutex);
ath11k_dbg(ab, ATH11K_DBG_MAC,
"chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_addr(ab, ar->mac_addr);
spin_unlock_bh(&ab->base_lock);
if (peer)
ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_mac_monitor_stop(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
ret);
mutex_unlock(&ar->conf_mutex);
return;
}
arvif->is_started = false;
mutex_unlock(&ar->conf_mutex);
return;
}
ret = ath11k_mac_vdev_stop(arvif);
if (ret)
ath11k_warn(ab, "failed to stop vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->is_started = false;
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_STA) {
ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
if (ret)
ath11k_warn(ar->ab,
"failed to delete peer %pM for vdev %d: %d\n",
arvif->bssid, arvif->vdev_id, ret);
else
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"removed peer %pM vdev %d after vdev stop\n",
arvif->bssid, arvif->vdev_id);
}
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ath11k_wmi_vdev_down(ar, arvif->vdev_id);
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
ar->num_started_vdevs == 1 &&
test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_stop(ar);
if (ret)
/* continue even if there's an error */
ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
ret);
}
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex);
}
static int
ath11k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
struct ath11k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"chanctx switch n_vifs %d mode %d\n",
n_vifs, mode);
ath11k_mac_update_vif_chan(ar, vifs, n_vifs);
mutex_unlock(&ar->conf_mutex);
return 0;
}
static int
ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value)
{
struct ath11k_vif *arvif;
int ret = 0;
mutex_lock(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
param, arvif->vdev_id, value);
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param, value);
if (ret) {
ath11k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
param, arvif->vdev_id, ret);
break;
}
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
/* mac80211 stores device specific RTS/Fragmentation threshold value,
* this is set interface specific to firmware from ath11k driver
*/
static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct ath11k *ar = hw->priv;
int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value);
}
static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
{
/* Even though there's a WMI vdev param for fragmentation threshold no
* known firmware actually implements it. Moreover it is not possible to
* rely frame fragmentation to mac80211 because firmware clears the
* "more fragments" bit in frame control making it impossible for remote
* devices to reassemble frames.
*
* Hence implement a dummy callback just to say fragmentation isn't
* supported. This effectively prevents mac80211 from doing frame
* fragmentation in software.
*/
return -EOPNOTSUPP;
}
static int ath11k_mac_flush_tx_complete(struct ath11k *ar)
{
long time_left;
int ret = 0;
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH11K_FLUSH_TIMEOUT);
if (time_left == 0) {
ath11k_warn(ar->ab, "failed to flush transmit queue, data pkts pending %d\n",
atomic_read(&ar->dp.num_tx_pending));
ret = -ETIMEDOUT;
}
time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
(atomic_read(&ar->num_pending_mgmt_tx) == 0),
ATH11K_FLUSH_TIMEOUT);
if (time_left == 0) {
ath11k_warn(ar->ab, "failed to flush mgmt transmit queue, mgmt pkts pending %d\n",
atomic_read(&ar->num_pending_mgmt_tx));
ret = -ETIMEDOUT;
}
return ret;
}
int ath11k_mac_wait_tx_complete(struct ath11k *ar)
{
ath11k_mac_drain_tx(ar);
return ath11k_mac_flush_tx_complete(ar);
}
static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath11k *ar = hw->priv;
if (drop)
return;
ath11k_mac_flush_tx_complete(ar);
}
static bool
ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
num_rates = hweight32(mask->control[band].legacy);
if (ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
return false;
if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
return false;
if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask))
return false;
return num_rates == 1;
}
static __le16
ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap)
{
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return he_cap->he_mcs_nss_supp.tx_mcs_80p80;
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return he_cap->he_mcs_nss_supp.tx_mcs_160;
return he_cap->he_mcs_nss_supp.tx_mcs_80;
}
static bool
ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
int *nss)
{
struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
u16 he_mcs_map = 0;
u8 ht_nss_mask = 0;
u8 vht_nss_mask = 0;
u8 he_nss_mask = 0;
int i;
/* No need to consider legacy here. Basic rates are always present
* in bitrate mask
*/
for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
if (mask->control[band].ht_mcs[i] == 0)
continue;
else if (mask->control[band].ht_mcs[i] ==
sband->ht_cap.mcs.rx_mask[i])
ht_nss_mask |= BIT(i);
else
return false;
}
for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
if (mask->control[band].vht_mcs[i] == 0)
continue;
else if (mask->control[band].vht_mcs[i] ==
ath11k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
vht_nss_mask |= BIT(i);
else
return false;
}
he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(&sband->iftype_data->he_cap));
for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
if (mask->control[band].he_mcs[i] == 0)
continue;
if (mask->control[band].he_mcs[i] ==
ath11k_mac_get_max_he_mcs_map(he_mcs_map, i))
he_nss_mask |= BIT(i);
else
return false;
}
if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask)
return false;
if (ht_nss_mask == 0)
return false;
if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
return false;
*nss = fls(ht_nss_mask);
return true;
}
static int
ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
u32 *rate, u8 *nss)
{
int rate_idx;
u16 bitrate;
u8 preamble;
u8 hw_rate;
if (hweight32(mask->control[band].legacy) != 1)
return -EINVAL;
rate_idx = ffs(mask->control[band].legacy) - 1;
if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ)
rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
hw_rate = ath11k_legacy_rates[rate_idx].hw_value;
bitrate = ath11k_legacy_rates[rate_idx].bitrate;
if (ath11k_mac_bitrate_is_cck(bitrate))
preamble = WMI_RATE_PREAMBLE_CCK;
else
preamble = WMI_RATE_PREAMBLE_OFDM;
*nss = 1;
*rate = ATH11K_HW_RATE_CODE(hw_rate, 0, preamble);
return 0;
}
static int
ath11k_mac_set_fixed_rate_gi_ltf(struct ath11k_vif *arvif, u8 he_gi, u8 he_ltf)
{
struct ath11k *ar = arvif->ar;
int ret;
/* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */
if (he_gi && he_gi != 0xFF)
he_gi += 1;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_SGI, he_gi);
if (ret) {
ath11k_warn(ar->ab, "failed to set he gi %d: %d\n",
he_gi, ret);
return ret;
}
/* start from 1 */
if (he_ltf != 0xFF)
he_ltf += 1;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_HE_LTF, he_ltf);
if (ret) {
ath11k_warn(ar->ab, "failed to set he ltf %d: %d\n",
he_ltf, ret);
return ret;
}
return 0;
}
static int
ath11k_mac_set_auto_rate_gi_ltf(struct ath11k_vif *arvif, u16 he_gi, u8 he_ltf)
{
struct ath11k *ar = arvif->ar;
int ret;
u32 he_ar_gi_ltf;
if (he_gi != 0xFF) {
switch (he_gi) {
case NL80211_RATE_INFO_HE_GI_0_8:
he_gi = WMI_AUTORATE_800NS_GI;
break;
case NL80211_RATE_INFO_HE_GI_1_6:
he_gi = WMI_AUTORATE_1600NS_GI;
break;
case NL80211_RATE_INFO_HE_GI_3_2:
he_gi = WMI_AUTORATE_3200NS_GI;
break;
default:
ath11k_warn(ar->ab, "invalid he gi: %d\n", he_gi);
return -EINVAL;
}
}
if (he_ltf != 0xFF) {
switch (he_ltf) {
case NL80211_RATE_INFO_HE_1XLTF:
he_ltf = WMI_HE_AUTORATE_LTF_1X;
break;
case NL80211_RATE_INFO_HE_2XLTF:
he_ltf = WMI_HE_AUTORATE_LTF_2X;
break;
case NL80211_RATE_INFO_HE_4XLTF:
he_ltf = WMI_HE_AUTORATE_LTF_4X;
break;
default:
ath11k_warn(ar->ab, "invalid he ltf: %d\n", he_ltf);
return -EINVAL;
}
}
he_ar_gi_ltf = he_gi | he_ltf;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_AUTORATE_MISC_CFG,
he_ar_gi_ltf);
if (ret) {
ath11k_warn(ar->ab,
"failed to set he autorate gi %u ltf %u: %d\n",
he_gi, he_ltf, ret);
return ret;
}
return 0;
}
static int ath11k_mac_set_rate_params(struct ath11k_vif *arvif,
u32 rate, u8 nss, u8 sgi, u8 ldpc,
u8 he_gi, u8 he_ltf, bool he_fixed_rate)
{
struct ath11k *ar = arvif->ar;
u32 vdev_param;
int ret;
lockdep_assert_held(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n",
arvif->vdev_id, rate, nss, sgi, ldpc, he_gi,
he_ltf, he_fixed_rate);
if (!arvif->vif->bss_conf.he_support) {
vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, rate);
if (ret) {
ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
rate, ret);
return ret;
}
}
vdev_param = WMI_VDEV_PARAM_NSS;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, nss);
if (ret) {
ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
nss, ret);
return ret;
}
vdev_param = WMI_VDEV_PARAM_LDPC;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, ldpc);
if (ret) {
ath11k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
ldpc, ret);
return ret;
}
if (arvif->vif->bss_conf.he_support) {
if (he_fixed_rate) {
ret = ath11k_mac_set_fixed_rate_gi_ltf(arvif, he_gi,
he_ltf);
if (ret) {
ath11k_warn(ar->ab, "failed to set fixed rate gi ltf: %d\n",
ret);
return ret;
}
} else {
ret = ath11k_mac_set_auto_rate_gi_ltf(arvif, he_gi,
he_ltf);
if (ret) {
ath11k_warn(ar->ab, "failed to set auto rate gi ltf: %d\n",
ret);
return ret;
}
}
} else {
vdev_param = WMI_VDEV_PARAM_SGI;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, sgi);
if (ret) {
ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
sgi, ret);
return ret;
}
}
return 0;
}
static bool
ath11k_mac_vht_mcs_range_present(struct ath11k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int i;
u16 vht_mcs;
for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
vht_mcs = mask->control[band].vht_mcs[i];
switch (vht_mcs) {
case 0:
case BIT(8) - 1:
case BIT(9) - 1:
case BIT(10) - 1:
break;
default:
return false;
}
}
return true;
}
static bool
ath11k_mac_he_mcs_range_present(struct ath11k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int i;
u16 he_mcs;
for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
he_mcs = mask->control[band].he_mcs[i];
switch (he_mcs) {
case 0:
case BIT(8) - 1:
case BIT(10) - 1:
case BIT(12) - 1:
break;
default:
return false;
}
}
return true;
}
static void ath11k_mac_set_bitrate_mask_iter(void *data,
struct ieee80211_sta *sta)
{
struct ath11k_vif *arvif = data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arvif->ar;
spin_lock_bh(&ar->data_lock);
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
ieee80211_queue_work(ar->hw, &arsta->update_wk);
}
static void ath11k_mac_disable_peer_fixed_rate(void *data,
struct ieee80211_sta *sta)
{
struct ath11k_vif *arvif = data;
struct ath11k *ar = arvif->ar;
int ret;
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
WMI_FIXED_RATE_NONE);
if (ret)
ath11k_warn(ar->ab,
"failed to disable peer fixed rate for STA %pM ret %d\n",
sta->addr, ret);
}
static bool
ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
bool he_fixed_rate = false, vht_fixed_rate = false;
struct ath11k_peer *peer;
const u16 *vht_mcs_mask, *he_mcs_mask;
struct ieee80211_link_sta *deflink;
u8 vht_nss, he_nss;
bool ret = true;
vht_mcs_mask = mask->control[band].vht_mcs;
he_mcs_mask = mask->control[band].he_mcs;
if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1)
vht_fixed_rate = true;
if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1)
he_fixed_rate = true;
if (!vht_fixed_rate && !he_fixed_rate)
return true;
vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
rcu_read_lock();
spin_lock_bh(&ar->ab->base_lock);
list_for_each_entry(peer, &ar->ab->peers, list) {
if (peer->sta) {
deflink = &peer->sta->deflink;
if (vht_fixed_rate && (!deflink->vht_cap.vht_supported ||
deflink->rx_nss < vht_nss)) {
ret = false;
goto out;
}
if (he_fixed_rate && (!deflink->he_cap.has_he ||
deflink->rx_nss < he_nss)) {
ret = false;
goto out;
}
}
}
out:
spin_unlock_bh(&ar->ab->base_lock);
rcu_read_unlock();
return ret;
}
static int
ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
struct ath11k_pdev_cap *cap;
struct ath11k *ar = arvif->ar;
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
const u16 *he_mcs_mask;
u8 he_ltf = 0;
u8 he_gi = 0;
u32 rate;
u8 nss;
u8 sgi;
u8 ldpc;
int single_nss;
int ret;
int num_rates;
bool he_fixed_rate = false;
if (ath11k_mac_vif_chan(vif, &def))
return -EPERM;
band = def.chan->band;
cap = &ar->pdev->cap;
ht_mcs_mask = mask->control[band].ht_mcs;
vht_mcs_mask = mask->control[band].vht_mcs;
he_mcs_mask = mask->control[band].he_mcs;
ldpc = !!(cap->band[band].ht_cap_info & WMI_HT_CAP_TX_LDPC);
sgi = mask->control[band].gi;
if (sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
he_gi = mask->control[band].he_gi;
he_ltf = mask->control[band].he_ltf;
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing at least one of used basic rates along with them.
* Fixed rate setting across different preambles(legacy, HT, VHT) is
* not supported by the FW. Hence use of FIXED_RATE vdev param is not
* suitable for setting single HT/VHT rates.
* But, there could be a single basic rate passed from userspace which
* can be done through the FIXED_RATE param.
*/
if (ath11k_mac_has_single_legacy_rate(ar, band, mask)) {
ret = ath11k_mac_get_single_legacy_rate(ar, band, mask, &rate,
&nss);
if (ret) {
ath11k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_disable_peer_fixed_rate,
arvif);
} else if (ath11k_mac_bitrate_mask_get_single_nss(ar, band, mask,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
nss = single_nss;
mutex_lock(&ar->conf_mutex);
arvif->bitrate_mask = *mask;
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_set_bitrate_mask_iter,
arvif);
mutex_unlock(&ar->conf_mutex);
} else {
rate = WMI_FIXED_RATE_NONE;
if (!ath11k_mac_validate_vht_he_fixed_rate_settings(ar, band, mask))
ath11k_warn(ar->ab,
"could not update fixed rate settings to all peers due to mcs/nss incompatibility\n");
nss = min_t(u32, ar->num_tx_chains,
max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
ath11k_mac_max_vht_nss(vht_mcs_mask)),
ath11k_mac_max_he_nss(he_mcs_mask)));
/* If multiple rates across different preambles are given
* we can reconfigure this info with all peers using PEER_ASSOC
* command with the below exception cases.
* - Single VHT Rate : peer_assoc command accommodates only MCS
* range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211
* mandates passing basic rates along with HT/VHT rates, FW
* doesn't allow switching from VHT to Legacy. Hence instead of
* setting legacy and VHT rates using RATEMASK_CMD vdev cmd,
* we could set this VHT rate as peer fixed rate param, which
* will override FIXED rate and FW rate control algorithm.
* If single VHT rate is passed along with HT rates, we select
* the VHT rate as fixed rate for vht peers.
* - Multiple VHT Rates : When Multiple VHT rates are given,this
* can be set using RATEMASK CMD which uses FW rate-ctl alg.
* TODO: Setting multiple VHT MCS and replacing peer_assoc with
* RATEMASK_CMDID can cover all use cases of setting rates
* across multiple preambles and rates within same type.
* But requires more validation of the command at this point.
*/
num_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
mask);
if (!ath11k_mac_vht_mcs_range_present(ar, band, mask) &&
num_rates > 1) {
/* TODO: Handle multiple VHT MCS values setting using
* RATEMASK CMD
*/
ath11k_warn(ar->ab,
"setting %d mcs values in bitrate mask not supported\n",
num_rates);
return -EINVAL;
}
num_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
mask);
if (num_rates == 1)
he_fixed_rate = true;
if (!ath11k_mac_he_mcs_range_present(ar, band, mask) &&
num_rates > 1) {
ath11k_warn(ar->ab,
"Setting more than one HE MCS Value in bitrate mask not supported\n");
return -EINVAL;
}
mutex_lock(&ar->conf_mutex);
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_disable_peer_fixed_rate,
arvif);
arvif->bitrate_mask = *mask;
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_set_bitrate_mask_iter,
arvif);
mutex_unlock(&ar->conf_mutex);
}
mutex_lock(&ar->conf_mutex);
ret = ath11k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi,
he_ltf, he_fixed_rate);
if (ret) {
ath11k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n",
arvif->vdev_id, ret);
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void
ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
int recovery_count;
struct ath11k_vif *arvif;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH11K_STATE_RESTARTED) {
ath11k_warn(ar->ab, "pdev %d successfully recovered\n",
ar->pdev->pdev_id);
ar->state = ATH11K_STATE_ON;
ieee80211_wake_queues(ar->hw);
if (ar->ab->hw_params.current_cc_support &&
ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
struct wmi_set_current_country_params set_current_param = {};
memcpy(&set_current_param.alpha2, ar->alpha2, 2);
ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
}
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"recovery count %d\n", recovery_count);
/* When there are multiple radios in an SOC,
* the recovery has to be done for each radio
*/
if (recovery_count == ab->num_radios) {
atomic_dec(&ab->reset_count);
complete(&ab->reset_complete);
ab->is_reset = false;
atomic_set(&ab->fail_cont_count, 0);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n");
}
}
if (ar->ab->hw_params.support_fw_mac_sequence) {
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
ieee80211_hw_restart_disconnect(arvif->vif);
}
}
}
mutex_unlock(&ar->conf_mutex);
}
static void
ath11k_mac_update_bss_chan_survey(struct ath11k *ar,
struct ieee80211_channel *channel)
{
int ret;
enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
lockdep_assert_held(&ar->conf_mutex);
if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
ar->rx_channel != channel)
return;
if (ar->scan.state != ATH11K_SCAN_IDLE) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"ignoring bss chan info req while scanning..\n");
return;
}
reinit_completion(&ar->bss_survey_done);
ret = ath11k_wmi_pdev_bss_chan_info_request(ar, type);
if (ret) {
ath11k_warn(ar->ab, "failed to send pdev bss chan info request\n");
return;
}
ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
if (ret == 0)
ath11k_warn(ar->ab, "bss channel survey timed out\n");
}
static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct ath11k *ar = hw->priv;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey;
int ret = 0;
if (idx >= ATH11K_NUM_CHANS)
return -ENOENT;
ar_survey = &ar->survey[idx];
mutex_lock(&ar->conf_mutex);
sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
if (!sband || idx >= sband->n_channels) {
ret = -ENOENT;
goto exit;
}
ath11k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
spin_unlock_bh(&ar->data_lock);
survey->channel = &sband->channels[idx];
if (ar->rx_channel == survey->channel)
survey->filled |= SURVEY_INFO_IN_USE;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void ath11k_mac_put_chain_rssi(struct station_info *sinfo,
struct ath11k_sta *arsta,
char *pre,
bool clear)
{
struct ath11k *ar = arsta->arvif->ar;
int i;
s8 rssi;
for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
sinfo->chains &= ~BIT(i);
rssi = arsta->chain_signal[i];
if (clear)
arsta->chain_signal[i] = ATH11K_INVALID_RSSI_FULL;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"sta statistics %s rssi[%d] %d\n", pre, i, rssi);
if (rssi != ATH11K_DEFAULT_NOISE_FLOOR &&
rssi != ATH11K_INVALID_RSSI_FULL &&
rssi != ATH11K_INVALID_RSSI_EMPTY &&
rssi != 0) {
sinfo->chain_signal[i] = rssi;
sinfo->chains |= BIT(i);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
}
}
}
static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
s8 signal;
bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
ar->ab->wmi_ab.svc_map);
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
sinfo->tx_duration = arsta->tx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
if (arsta->txrate.legacy || arsta->txrate.nss) {
if (arsta->txrate.legacy) {
sinfo->txrate.legacy = arsta->txrate.legacy;
} else {
sinfo->txrate.mcs = arsta->txrate.mcs;
sinfo->txrate.nss = arsta->txrate.nss;
sinfo->txrate.bw = arsta->txrate.bw;
sinfo->txrate.he_gi = arsta->txrate.he_gi;
sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
}
sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false);
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
!ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true);
}
signal = arsta->rssi_comb;
if (!signal &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
!(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
WMI_REQUEST_VDEV_STAT)))
signal = arsta->rssi_beacon;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"sta statistics db2dbm %u rssi comb %d rssi beacon %d\n",
db2dbm, arsta->rssi_comb, arsta->rssi_beacon);
if (signal) {
sinfo->signal = db2dbm ? signal : signal + ATH11K_DEFAULT_NOISE_FLOOR;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) +
ATH11K_DEFAULT_NOISE_FLOOR;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
#if IS_ENABLED(CONFIG_IPV6)
static void ath11k_generate_ns_mc_addr(struct ath11k *ar,
struct ath11k_arp_ns_offload *offload)
{
int i;
for (i = 0; i < offload->ipv6_count; i++) {
offload->self_ipv6_addr[i][0] = 0xff;
offload->self_ipv6_addr[i][1] = 0x02;
offload->self_ipv6_addr[i][11] = 0x01;
offload->self_ipv6_addr[i][12] = 0xff;
offload->self_ipv6_addr[i][13] =
offload->ipv6_addr[i][13];
offload->self_ipv6_addr[i][14] =
offload->ipv6_addr[i][14];
offload->self_ipv6_addr[i][15] =
offload->ipv6_addr[i][15];
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "NS solicited addr %pI6\n",
offload->self_ipv6_addr[i]);
}
}
static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct inet6_dev *idev)
{
struct ath11k *ar = hw->priv;
struct ath11k_arp_ns_offload *offload;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct inet6_ifaddr *ifa6;
struct ifacaddr6 *ifaca6;
struct list_head *p;
u32 count, scope;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "op ipv6 changed\n");
offload = &arvif->arp_ns_offload;
count = 0;
read_lock_bh(&idev->lock);
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
memset(offload->self_ipv6_addr, 0, sizeof(offload->self_ipv6_addr));
memcpy(offload->mac_addr, vif->addr, ETH_ALEN);
/* get unicast address */
list_for_each(p, &idev->addr_list) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
ifa6 = list_entry(p, struct inet6_ifaddr, if_list);
if (ifa6->flags & IFA_F_DADFAILED)
continue;
scope = ipv6_addr_src_scope(&ifa6->addr);
if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
scope == IPV6_ADDR_SCOPE_GLOBAL) {
memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
sizeof(ifa6->addr.s6_addr));
offload->ipv6_type[count] = ATH11K_IPV6_UC_TYPE;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 uc %pI6 scope %d\n",
count, offload->ipv6_addr[count],
scope);
count++;
} else {
ath11k_warn(ar->ab, "Unsupported ipv6 scope: %d\n", scope);
}
}
/* get anycast address */
for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
scope == IPV6_ADDR_SCOPE_GLOBAL) {
memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
sizeof(ifaca6->aca_addr));
offload->ipv6_type[count] = ATH11K_IPV6_AC_TYPE;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 ac %pI6 scope %d\n",
count, offload->ipv6_addr[count],
scope);
count++;
} else {
ath11k_warn(ar->ab, "Unsupported ipv scope: %d\n", scope);
}
}
generate:
offload->ipv6_count = count;
read_unlock_bh(&idev->lock);
/* generate ns multicast address */
ath11k_generate_ns_mc_addr(ar, offload);
}
#endif
static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *data)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "set rekey data vdev %d\n",
arvif->vdev_id);
mutex_lock(&ar->conf_mutex);
memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
/* The supplicant works on big-endian, the firmware expects it on
* little endian.
*/
rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
arvif->rekey_data.enable_offload = true;
ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kck", NULL,
rekey_data->kck, NL80211_KCK_LEN);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kek", NULL,
rekey_data->kck, NL80211_KEK_LEN);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "replay ctr", NULL,
&rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
mutex_unlock(&ar->conf_mutex);
}
static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
{
struct ath11k *ar = hw->priv;
const struct cfg80211_sar_sub_specs *sspec;
int ret, index;
u8 *sar_tbl;
u32 i;
if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
sar->num_sub_specs == 0)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (!test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) ||
!ar->ab->hw_params.bios_sar_capa) {
ret = -EOPNOTSUPP;
goto exit;
}
ret = ath11k_wmi_pdev_set_bios_geo_table_param(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to set geo table: %d\n", ret);
goto exit;
}
sar_tbl = kzalloc(BIOS_SAR_TABLE_LEN, GFP_KERNEL);
if (!sar_tbl) {
ret = -ENOMEM;
goto exit;
}
sspec = sar->sub_specs;
for (i = 0; i < sar->num_sub_specs; i++) {
if (sspec->freq_range_index >= (BIOS_SAR_TABLE_LEN >> 1)) {
ath11k_warn(ar->ab, "Ignore bad frequency index %u, max allowed %u\n",
sspec->freq_range_index, BIOS_SAR_TABLE_LEN >> 1);
continue;
}
/* chain0 and chain1 share same power setting */
sar_tbl[sspec->freq_range_index] = sspec->power;
index = sspec->freq_range_index + (BIOS_SAR_TABLE_LEN >> 1);
sar_tbl[index] = sspec->power;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sar tbl[%d] = %d\n",
sspec->freq_range_index, sar_tbl[sspec->freq_range_index]);
sspec++;
}
ret = ath11k_wmi_pdev_set_bios_sar_table_param(ar, sar_tbl);
if (ret)
ath11k_warn(ar->ab, "failed to set sar power: %d", ret);
kfree(sar_tbl);
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
ar->scan.roc_notify = false;
spin_unlock_bh(&ar->data_lock);
ath11k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
return 0;
}
static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *chan,
int duration,
enum ieee80211_roc_type type)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct scan_req_params arg;
int ret;
u32 scan_time_msec;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH11K_SCAN_IDLE:
reinit_completion(&ar->scan.started);
reinit_completion(&ar->scan.completed);
reinit_completion(&ar->scan.on_channel);
ar->scan.state = ATH11K_SCAN_STARTING;
ar->scan.is_roc = true;
ar->scan.vdev_id = arvif->vdev_id;
ar->scan.roc_freq = chan->center_freq;
ar->scan.roc_notify = true;
ret = 0;
break;
case ATH11K_SCAN_STARTING:
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
ret = -EBUSY;
break;
}
spin_unlock_bh(&ar->data_lock);
if (ret)
goto exit;
scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
memset(&arg, 0, sizeof(arg));
ath11k_wmi_start_scan_init(ar, &arg);
arg.num_chan = 1;
arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
GFP_KERNEL);
if (!arg.chan_list) {
ret = -ENOMEM;
goto exit;
}
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH11K_SCAN_ID;
arg.chan_list[0] = chan->center_freq;
arg.dwell_time_active = scan_time_msec;
arg.dwell_time_passive = scan_time_msec;
arg.max_scan_time = scan_time_msec;
arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
arg.scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
arg.burst_duration = duration;
ret = ath11k_start_scan(ar, &arg);
if (ret) {
ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH11K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
goto free_chan_list;
}
ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
if (ret == 0) {
ath11k_warn(ar->ab, "failed to switch to channel for roc scan\n");
ret = ath11k_scan_stop(ar);
if (ret)
ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
ret = -ETIMEDOUT;
goto free_chan_list;
}
ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
msecs_to_jiffies(duration));
ret = 0;
free_chan_list:
kfree(arg.chan_list);
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_fw_stats_request(struct ath11k *ar,
struct stats_request_params *req_param)
{
struct ath11k_base *ab = ar->ab;
unsigned long time_left;
int ret;
lockdep_assert_held(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
ar->fw_stats_done = false;
ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
spin_unlock_bh(&ar->data_lock);
reinit_completion(&ar->fw_stats_complete);
ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
if (ret) {
ath11k_warn(ab, "could not request fw stats (%d)\n",
ret);
return ret;
}
time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
1 * HZ);
if (!time_left)
return -ETIMEDOUT;
return 0;
}
static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
int *dbm)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct stats_request_params req_param = {0};
struct ath11k_fw_stats_pdev *pdev;
int ret;
/* Final Tx power is minimum of Target Power, CTL power, Regulatory
* Power, PSD EIRP Power. We just know the Regulatory power from the
* regulatory rules obtained. FW knows all these power and sets the min
* of these. Hence, we request the FW pdev stats in which FW reports
* the minimum of all vdev's channel Tx power.
*/
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON)
goto err_fallback;
req_param.pdev_id = ar->pdev->pdev_id;
req_param.stats_id = WMI_REQUEST_PDEV_STAT;
ret = ath11k_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
goto err_fallback;
}
spin_lock_bh(&ar->data_lock);
pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
struct ath11k_fw_stats_pdev, list);
if (!pdev) {
spin_unlock_bh(&ar->data_lock);
goto err_fallback;
}
/* tx power is set as 2 units per dBm in FW. */
*dbm = pdev->chan_tx_power / 2;
spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n",
pdev->chan_tx_power, *dbm);
return 0;
err_fallback:
mutex_unlock(&ar->conf_mutex);
/* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
*dbm = vif->bss_conf.txpower;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
*dbm);
return 0;
}
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath11k_mac_op_start,
.stop = ath11k_mac_op_stop,
.reconfig_complete = ath11k_mac_op_reconfig_complete,
.add_interface = ath11k_mac_op_add_interface,
.remove_interface = ath11k_mac_op_remove_interface,
.update_vif_offload = ath11k_mac_op_update_vif_offload,
.config = ath11k_mac_op_config,
.bss_info_changed = ath11k_mac_op_bss_info_changed,
.configure_filter = ath11k_mac_op_configure_filter,
.hw_scan = ath11k_mac_op_hw_scan,
.cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
.set_key = ath11k_mac_op_set_key,
.set_rekey_data = ath11k_mac_op_set_rekey_data,
.sta_state = ath11k_mac_op_sta_state,
.sta_set_4addr = ath11k_mac_op_sta_set_4addr,
.sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
.sta_rc_update = ath11k_mac_op_sta_rc_update,
.conf_tx = ath11k_mac_op_conf_tx,
.set_antenna = ath11k_mac_op_set_antenna,
.get_antenna = ath11k_mac_op_get_antenna,
.ampdu_action = ath11k_mac_op_ampdu_action,
.add_chanctx = ath11k_mac_op_add_chanctx,
.remove_chanctx = ath11k_mac_op_remove_chanctx,
.change_chanctx = ath11k_mac_op_change_chanctx,
.assign_vif_chanctx = ath11k_mac_op_assign_vif_chanctx,
.unassign_vif_chanctx = ath11k_mac_op_unassign_vif_chanctx,
.switch_vif_chanctx = ath11k_mac_op_switch_vif_chanctx,
.set_rts_threshold = ath11k_mac_op_set_rts_threshold,
.set_frag_threshold = ath11k_mac_op_set_frag_threshold,
.set_bitrate_mask = ath11k_mac_op_set_bitrate_mask,
.get_survey = ath11k_mac_op_get_survey,
.flush = ath11k_mac_op_flush,
.sta_statistics = ath11k_mac_op_sta_statistics,
CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
#ifdef CONFIG_PM
.suspend = ath11k_wow_op_suspend,
.resume = ath11k_wow_op_resume,
.set_wakeup = ath11k_wow_op_set_wakeup,
#endif
#ifdef CONFIG_ATH11K_DEBUGFS
.sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = ath11k_mac_op_ipv6_changed,
#endif
.get_txpower = ath11k_mac_op_get_txpower,
.set_sar_specs = ath11k_mac_op_set_bios_sar_specs,
.remain_on_channel = ath11k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath11k_mac_op_cancel_remain_on_channel,
};
static void ath11k_mac_update_ch_list(struct ath11k *ar,
struct ieee80211_supported_band *band,
u32 freq_low, u32 freq_high)
{
int i;
if (!(freq_low && freq_high))
return;
for (i = 0; i < band->n_channels; i++) {
if (band->channels[i].center_freq < freq_low ||
band->channels[i].center_freq > freq_high)
band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
}
static u32 ath11k_get_phy_id(struct ath11k *ar, u32 band)
{
struct ath11k_pdev *pdev = ar->pdev;
struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
if (band == WMI_HOST_WLAN_2G_CAP)
return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
if (band == WMI_HOST_WLAN_5G_CAP)
return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
ath11k_warn(ar->ab, "unsupported phy cap:%d\n", band);
return 0;
}
static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
u32 supported_bands)
{
struct ieee80211_supported_band *band;
struct ath11k_hal_reg_capabilities_ext *reg_cap, *temp_reg_cap;
void *channels;
u32 phy_id;
BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
ARRAY_SIZE(ath11k_5ghz_channels) +
ARRAY_SIZE(ath11k_6ghz_channels)) !=
ATH11K_NUM_CHANS);
reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
temp_reg_cap = reg_cap;
if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
channels = kmemdup(ath11k_2ghz_channels,
sizeof(ath11k_2ghz_channels),
GFP_KERNEL);
if (!channels)
return -ENOMEM;
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->band = NL80211_BAND_2GHZ;
band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels);
band->channels = channels;
band->n_bitrates = ath11k_g_rates_size;
band->bitrates = ath11k_g_rates;
ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
if (ar->ab->hw_params.single_pdev_only) {
phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath11k_mac_update_ch_list(ar, band,
temp_reg_cap->low_2ghz_chan,
temp_reg_cap->high_2ghz_chan);
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
if (reg_cap->high_5ghz_chan >= ATH11K_MIN_6G_FREQ) {
channels = kmemdup(ath11k_6ghz_channels,
sizeof(ath11k_6ghz_channels), GFP_KERNEL);
if (!channels) {
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
return -ENOMEM;
}
ar->supports_6ghz = true;
band = &ar->mac.sbands[NL80211_BAND_6GHZ];
band->band = NL80211_BAND_6GHZ;
band->n_channels = ARRAY_SIZE(ath11k_6ghz_channels);
band->channels = channels;
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
if (ar->ab->hw_params.single_pdev_only) {
phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath11k_mac_update_ch_list(ar, band,
temp_reg_cap->low_5ghz_chan,
temp_reg_cap->high_5ghz_chan);
}
if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) {
channels = kmemdup(ath11k_5ghz_channels,
sizeof(ath11k_5ghz_channels),
GFP_KERNEL);
if (!channels) {
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
return -ENOMEM;
}
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->band = NL80211_BAND_5GHZ;
band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
band->channels = channels;
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
if (ar->ab->hw_params.single_pdev_only) {
phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath11k_mac_update_ch_list(ar, band,
temp_reg_cap->low_5ghz_chan,
temp_reg_cap->high_5ghz_chan);
}
}
return 0;
}
static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits;
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
n_limits = 2;
limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
if (!limits) {
kfree(combinations);
return -ENOMEM;
}
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
limits[1].max = 16;
limits[1].types |= BIT(NL80211_IFTYPE_AP);
if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
combinations[0].limits = limits;
combinations[0].n_limits = n_limits;
combinations[0].max_interfaces = 16;
combinations[0].num_different_channels = 1;
combinations[0].beacon_int_infra_match = true;
combinations[0].beacon_int_min_gcd = 100;
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_80P80) |
BIT(NL80211_CHAN_WIDTH_160);
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
return 0;
}
static const u8 ath11k_if_types_ext_capa[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
static const u8 ath11k_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const u8 ath11k_if_types_ext_capa_ap[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
[10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
};
static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
{
.extended_capabilities = ath11k_if_types_ext_capa,
.extended_capabilities_mask = ath11k_if_types_ext_capa,
.extended_capabilities_len = sizeof(ath11k_if_types_ext_capa),
}, {
.iftype = NL80211_IFTYPE_STATION,
.extended_capabilities = ath11k_if_types_ext_capa_sta,
.extended_capabilities_mask = ath11k_if_types_ext_capa_sta,
.extended_capabilities_len =
sizeof(ath11k_if_types_ext_capa_sta),
}, {
.iftype = NL80211_IFTYPE_AP,
.extended_capabilities = ath11k_if_types_ext_capa_ap,
.extended_capabilities_mask = ath11k_if_types_ext_capa_ap,
.extended_capabilities_len =
sizeof(ath11k_if_types_ext_capa_ap),
},
};
static void __ath11k_mac_unregister(struct ath11k *ar)
{
cancel_work_sync(&ar->regd_update_work);
ieee80211_unregister_hw(ar->hw);
idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
SET_IEEE80211_DEV(ar->hw, NULL);
}
void ath11k_mac_unregister(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar)
continue;
__ath11k_mac_unregister(ar);
}
ath11k_peer_rhash_tbl_destroy(ab);
}
static int __ath11k_mac_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_pdev_cap *cap = &ar->pdev->cap;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WLAN_CIPHER_SUITE_AES_CMAC,
WLAN_CIPHER_SUITE_BIP_CMAC_256,
WLAN_CIPHER_SUITE_BIP_GMAC_128,
WLAN_CIPHER_SUITE_BIP_GMAC_256,
WLAN_CIPHER_SUITE_GCMP,
WLAN_CIPHER_SUITE_GCMP_256,
WLAN_CIPHER_SUITE_CCMP_256,
};
int ret;
u32 ht_cap = 0;
ath11k_pdev_caps_update(ar);
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
SET_IEEE80211_DEV(ar->hw, ab->dev);
ret = ath11k_mac_setup_channels_rates(ar,
cap->supported_bands);
if (ret)
goto err;
ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
ath11k_mac_setup_he_cap(ar, cap);
ret = ath11k_mac_setup_iface_combinations(ar);
if (ret) {
ath11k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
goto err_free_channels;
}
ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes;
if (ab->hw_params.single_pdev_only && ar->supports_6ghz)
ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS);
if (ab->hw_params.supports_multi_bssid) {
ieee80211_hw_set(ar->hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(ar->hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
}
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(ar->hw, MFP_CAPABLE);
ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(ar->hw, AP_LINK_PS);
ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET) {
ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
ieee80211_hw_set(ar->hw, SUPPORTS_RX_DECAP_OFFLOAD);
}
if (cap->nss_ratio_enabled)
ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW);
if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(ar->hw, USES_RSS);
}
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
/* TODO: Check if HT capability advertised from firmware is different
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
(ar->supports_6ghz && ab->hw_params.supports_dynamic_smps_6ghz))
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
ar->hw->max_listen_interval = ATH11K_MAX_HW_LISTEN_INTERVAL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
ar->max_num_stations = TARGET_NUM_STATIONS(ab);
ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab);
ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
ar->hw->wiphy->features |=
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
}
if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
ar->hw->wiphy->max_sched_scan_plan_interval =
WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
ar->hw->wiphy->max_sched_scan_plan_iterations =
WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
}
ret = ath11k_wow_init(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to init wow: %d\n", ret);
goto err_free_if_combs;
}
if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
ar->ab->wmi_ab.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
ar->hw->vif_data_size = sizeof(struct ath11k_vif);
ar->hw->sta_data_size = sizeof(struct ath11k_sta);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD,
ar->ab->wmi_ab.svc_map)) {
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_BSS_COLOR);
ieee80211_hw_set(ar->hw, DETECTS_COLOR_COLLISION);
}
ar->hw->wiphy->cipher_suites = cipher_suites;
ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
ar->hw->wiphy->iftype_ext_capab = ath11k_iftypes_ext_capa;
ar->hw->wiphy->num_iftype_ext_capab =
ARRAY_SIZE(ath11k_iftypes_ext_capa);
if (ar->supports_6ghz) {
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_FILS_DISCOVERY);
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
}
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
if (test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
ar->hw->wiphy->mbssid_max_interfaces = TARGET_NUM_VDEVS(ab);
ar->hw->wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
ath11k_reg_init(ar);
if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
ar->hw->netdev_features = NETIF_F_HW_CSUM;
ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
}
if (test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) &&
ab->hw_params.bios_sar_capa)
ar->hw->wiphy->sar_capa = ab->hw_params.bios_sar_capa;
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
goto err_free_if_combs;
}
if (!ab->hw_params.supports_monitor)
/* There's a race between calling ieee80211_register_hw()
* and here where the monitor mode is enabled for a little
* while. But that time is so short and in practise it make
* a difference in real life.
*/
ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
/* Apply the regd received during initialization */
ret = ath11k_regd_update(ar);
if (ret) {
ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
goto err_unregister_hw;
}
if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) {
struct wmi_set_current_country_params set_current_param = {};
memcpy(&set_current_param.alpha2, ab->new_alpha2, 2);
memcpy(&ar->alpha2, ab->new_alpha2, 2);
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret)
ath11k_warn(ar->ab,
"failed set cc code for mac register: %d\n", ret);
}
ret = ath11k_debugfs_register(ar);
if (ret) {
ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
goto err_unregister_hw;
}
return 0;
err_unregister_hw:
ieee80211_unregister_hw(ar->hw);
err_free_if_combs:
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
err_free_channels:
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
err:
SET_IEEE80211_DEV(ar->hw, NULL);
return ret;
}
int ath11k_mac_register(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i;
int ret;
u8 mac_addr[ETH_ALEN] = {0};
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
/* Initialize channel counters frequency value in hertz */
ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
ret = ath11k_peer_rhash_tbl_init(ab);
if (ret)
return ret;
device_get_mac_address(ab->dev, mac_addr);
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ab->pdevs_macaddr_valid) {
ether_addr_copy(ar->mac_addr, pdev->mac_addr);
} else {
if (is_zero_ether_addr(mac_addr))
ether_addr_copy(ar->mac_addr, ab->mac_addr);
else
ether_addr_copy(ar->mac_addr, mac_addr);
ar->mac_addr[4] += i;
}
idr_init(&ar->txmgmt_idr);
spin_lock_init(&ar->txmgmt_idr_lock);
ret = __ath11k_mac_register(ar);
if (ret)
goto err_cleanup;
init_waitqueue_head(&ar->txmgmt_empty_waitq);
}
return 0;
err_cleanup:
for (i = i - 1; i >= 0; i--) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
__ath11k_mac_unregister(ar);
}
ath11k_peer_rhash_tbl_destroy(ab);
return ret;
}
int ath11k_mac_allocate(struct ath11k_base *ab)
{
struct ieee80211_hw *hw;
struct ath11k *ar;
struct ath11k_pdev *pdev;
int ret;
int i;
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops);
if (!hw) {
ath11k_warn(ab, "failed to allocate mac80211 hw device\n");
ret = -ENOMEM;
goto err_free_mac;
}
ar = hw->priv;
ar->hw = hw;
ar->ab = ab;
ar->pdev = pdev;
ar->pdev_idx = i;
ar->lmac_id = ath11k_hw_get_mac_from_pdev_id(&ab->hw_params, i);
ar->wmi = &ab->wmi_ab.wmi[i];
/* FIXME wmi[0] is already initialized during attach,
* Should we do this again?
*/
ath11k_wmi_pdev_attach(ab, i);
ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask);
ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask);
pdev->ar = ar;
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->arvifs);
INIT_LIST_HEAD(&ar->ppdu_stats_info);
mutex_init(&ar->conf_mutex);
init_completion(&ar->vdev_setup_done);
init_completion(&ar->vdev_delete_done);
init_completion(&ar->peer_assoc_done);
init_completion(&ar->peer_delete_done);
init_completion(&ar->install_key_done);
init_completion(&ar->bss_survey_done);
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->thermal.wmi_sync);
INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
ar->monitor_vdev_id = -1;
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
init_completion(&ar->completed_11d_scan);
ath11k_fw_stats_init(ar);
}
return 0;
err_free_mac:
ath11k_mac_destroy(ab);
return ret;
}
void ath11k_mac_destroy(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar)
continue;
ath11k_fw_stats_free(&ar->fw_stats);
ieee80211_free_hw(ar->hw);
pdev->ar = NULL;
}
}
int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval)
{
struct ath11k *ar = arvif->ar;
struct wmi_sta_keepalive_arg arg = {};
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
return 0;
if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map))
return 0;
arg.vdev_id = arvif->vdev_id;
arg.enabled = 1;
arg.method = method;
arg.interval = interval;
ret = ath11k_wmi_sta_keepalive(ar, &arg);
if (ret) {
ath11k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/mac.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/of.h>
#include "pci.h"
#include "core.h"
#include "hif.h"
#include "mhi.h"
#include "debug.h"
#include "pcic.h"
#include "qmi.h"
#define ATH11K_PCI_BAR_NUM 0
#define ATH11K_PCI_DMA_MASK 32
#define TCSR_SOC_HW_VERSION 0x0224
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
#define QCA6390_DEVICE_ID 0x1101
#define QCN9074_DEVICE_ID 0x1104
#define WCN6855_DEVICE_ID 0x1103
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
{0}
};
MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
static int ath11k_pci_bus_wake_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
}
static void ath11k_pci_bus_release(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
}
static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset)
{
if (!ab->hw_params.static_window_map)
return ATH11K_PCI_WINDOW_START;
if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
/* if offset lies within DP register range, use 3rd window */
return 3 * ATH11K_PCI_WINDOW_START;
else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
ATH11K_PCI_WINDOW_RANGE_MASK)
/* if offset lies within CE register range, use 2nd window */
return 2 * ATH11K_PCI_WINDOW_START;
else
return ATH11K_PCI_WINDOW_START;
}
static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
{
struct ath11k_base *ab = ab_pci->ab;
u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset);
lockdep_assert_held(&ab_pci->window_lock);
if (window != ab_pci->register_window) {
iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
ab_pci->register_window = window;
}
}
static void
ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 window_start;
window_start = ath11k_pci_get_window_start(ab, offset);
if (window_start == ATH11K_PCI_WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath11k_pci_select_window(ab_pci, offset);
iowrite32(value, ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
spin_unlock_bh(&ab_pci->window_lock);
} else {
iowrite32(value, ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
}
}
static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 window_start, val;
window_start = ath11k_pci_get_window_start(ab, offset);
if (window_start == ATH11K_PCI_WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath11k_pci_select_window(ab_pci, offset);
val = ioread32(ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
spin_unlock_bh(&ab_pci->window_lock);
} else {
val = ioread32(ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
}
return val;
}
int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{
struct pci_dev *pci_dev = to_pci_dev(ab->dev);
return pci_irq_vector(pci_dev, vector);
}
static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
.wakeup = ath11k_pci_bus_wake_up,
.release = ath11k_pci_bus_release,
.get_msi_irq = ath11k_pci_get_msi_irq,
.window_write32 = ath11k_pci_window_write32,
.window_read32 = ath11k_pci_window_read32,
};
static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
.wakeup = NULL,
.release = NULL,
.get_msi_irq = ath11k_pci_get_msi_irq,
.window_write32 = ath11k_pci_window_write32,
.window_read32 = ath11k_pci_window_read32,
};
static const struct ath11k_msi_config msi_config_one_msi = {
.total_vectors = 1,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 1, .base_vector = 0 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
{ .name = "DP", .num_vectors = 1, .base_vector = 0 },
},
};
static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
{
u32 umac_window;
u32 ce_window;
u32 window;
umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
window = (umac_window << 12) | (ce_window << 6);
iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
}
static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
{
u32 val, delay;
val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
val |= PCIE_SOC_GLOBAL_RESET_V;
ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
/* TODO: exact time to sleep is uncertain */
delay = 10;
mdelay(delay);
/* Need to toggle V bit back otherwise stuck in reset status */
val &= ~PCIE_SOC_GLOBAL_RESET_V;
ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
mdelay(delay);
val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
if (val == 0xffffffff)
ath11k_warn(ab, "link down error during global reset\n");
}
static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
{
u32 val;
/* read cookie */
val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR);
ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_q6_cookie_addr 0x%x\n", val);
val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
/* TODO: exact time to sleep is uncertain */
mdelay(10);
/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
* continuing warm path and entering dead loop.
*/
ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0);
mdelay(10);
val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
/* A read clear register. clear the register to prevent
* Q6 from entering wrong code path.
*/
val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause %d\n", val);
}
static int ath11k_pci_set_link_reg(struct ath11k_base *ab,
u32 offset, u32 value, u32 mask)
{
u32 v;
int i;
v = ath11k_pcic_read32(ab, offset);
if ((v & mask) == value)
return 0;
for (i = 0; i < 10; i++) {
ath11k_pcic_write32(ab, offset, (v & ~mask) | value);
v = ath11k_pcic_read32(ab, offset);
if ((v & mask) == value)
return 0;
mdelay(2);
}
ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n",
offset, v & mask, value);
return -ETIMEDOUT;
}
static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
{
int ret;
ret = ath11k_pci_set_link_reg(ab,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab),
PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
if (ret) {
ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
return ret;
}
ret = ath11k_pci_set_link_reg(ab,
PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab),
PCIE_PCS_OSC_DTCT_CONFIG1_VAL,
PCIE_PCS_OSC_DTCT_CONFIG_MSK);
if (ret) {
ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
return ret;
}
ret = ath11k_pci_set_link_reg(ab,
PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab),
PCIE_PCS_OSC_DTCT_CONFIG2_VAL,
PCIE_PCS_OSC_DTCT_CONFIG_MSK);
if (ret) {
ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
return ret;
}
ret = ath11k_pci_set_link_reg(ab,
PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab),
PCIE_PCS_OSC_DTCT_CONFIG4_VAL,
PCIE_PCS_OSC_DTCT_CONFIG_MSK);
if (ret) {
ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
return ret;
}
return 0;
}
static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
{
u32 val;
int i;
val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
/* PCIE link seems very unstable after the Hot Reset*/
for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
if (val == 0xffffffff)
mdelay(5);
ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
}
ath11k_dbg(ab, ATH11K_DBG_PCI, "ltssm 0x%x\n", val);
val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
val |= GCC_GCC_PCIE_HOT_RST_VAL;
ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_hot_rst 0x%x\n", val);
mdelay(5);
}
static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
{
/* This is a WAR for PCIE Hotreset.
* When target receive Hotreset, but will set the interrupt.
* So when download SBL again, SBL will open Interrupt and
* receive it, and crash immediately.
*/
ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
}
static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab)
{
u32 val;
val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
}
static void ath11k_pci_force_wake(struct ath11k_base *ab)
{
ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
mdelay(5);
}
static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
{
mdelay(100);
if (power_on) {
ath11k_pci_enable_ltssm(ab);
ath11k_pci_clear_all_intrs(ab);
ath11k_pci_set_wlaon_pwr_ctrl(ab);
if (ab->hw_params.fix_l1ss)
ath11k_pci_fix_l1ss(ab);
}
ath11k_mhi_clear_vector(ab);
ath11k_pci_clear_dbg_registers(ab);
ath11k_pci_soc_global_reset(ab);
ath11k_mhi_set_mhictrl_reset(ab);
}
static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
{
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
cfg->tgt_ce = ab->hw_params.target_ce_config;
cfg->tgt_ce_len = ab->hw_params.target_ce_count;
cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
&cfg->shadow_reg_v2_len);
}
static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
{
struct pci_dev *dev = ab_pci->pdev;
u16 control;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
if (enable)
control |= PCI_MSI_FLAGS_ENABLE;
else
control &= ~PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
static void ath11k_pci_msi_enable(struct ath11k_pci *ab_pci)
{
ath11k_pci_msi_config(ab_pci, true);
}
static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci)
{
ath11k_pci_msi_config(ab_pci, false);
}
static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
struct pci_dev *pci_dev = ab_pci->pdev;
struct msi_desc *msi_desc;
int num_vectors;
int ret;
num_vectors = pci_alloc_irq_vectors(pci_dev,
msi_config->total_vectors,
msi_config->total_vectors,
PCI_IRQ_MSI);
if (num_vectors == msi_config->total_vectors) {
set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
} else {
num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
1,
1,
PCI_IRQ_MSI);
if (num_vectors < 0) {
ret = -EINVAL;
goto reset_msi_config;
}
clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
ab->pci.msi.config = &msi_config_one_msi;
ath11k_dbg(ab, ATH11K_DBG_PCI, "request one msi vector\n");
}
ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
ath11k_pci_msi_disable(ab_pci);
msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
if (!msi_desc) {
ath11k_err(ab, "msi_desc is NULL!\n");
ret = -EINVAL;
goto free_msi_vector;
}
ab->pci.msi.ep_base_data = msi_desc->msg.data;
pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
&ab->pci.msi.addr_lo);
if (msi_desc->pci.msi_attrib.is_64) {
pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
&ab->pci.msi.addr_hi);
} else {
ab->pci.msi.addr_hi = 0;
}
ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data);
return 0;
free_msi_vector:
pci_free_irq_vectors(ab_pci->pdev);
reset_msi_config:
return ret;
}
static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci)
{
pci_free_irq_vectors(ab_pci->pdev);
}
static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
{
struct msi_desc *msi_desc;
msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
if (!msi_desc) {
ath11k_err(ab_pci->ab, "msi_desc is NULL!\n");
pci_free_irq_vectors(ab_pci->pdev);
return -EINVAL;
}
ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data;
ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "after request_irq msi_ep_base_data %d\n",
ab_pci->ab->pci.msi.ep_base_data);
return 0;
}
static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
{
struct ath11k_base *ab = ab_pci->ab;
u16 device_id;
int ret = 0;
pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
if (device_id != ab_pci->dev_id) {
ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
device_id, ab_pci->dev_id);
ret = -EIO;
goto out;
}
ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
if (ret) {
ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
goto out;
}
ret = pci_enable_device(pdev);
if (ret) {
ath11k_err(ab, "failed to enable pci device: %d\n", ret);
goto out;
}
ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
if (ret) {
ath11k_err(ab, "failed to request pci region: %d\n", ret);
goto disable_device;
}
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
if (ret) {
ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
ATH11K_PCI_DMA_MASK, ret);
goto release_region;
}
pci_set_master(pdev);
ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
if (!ab->mem) {
ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
ret = -EIO;
goto release_region;
}
ab->mem_ce = ab->mem;
ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci_mem 0x%p\n", ab->mem);
return 0;
release_region:
pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
disable_device:
pci_disable_device(pdev);
out:
return ret;
}
static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
struct pci_dev *pci_dev = ab_pci->pdev;
pci_iounmap(pci_dev, ab->mem);
ab->mem = NULL;
pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
if (pci_is_enabled(pci_dev))
pci_disable_device(pci_dev);
}
static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
&ab_pci->link_ctl);
ath11k_dbg(ab, ATH11K_DBG_PCI, "link_ctl 0x%04x L0s %d L1 %d\n",
ab_pci->link_ctl,
u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
/* disable L0s and L1 */
pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC);
set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
}
static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
{
if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC,
ab_pci->link_ctl &
PCI_EXP_LNKCTL_ASPMC);
}
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
int ret;
ab_pci->register_window = 0;
clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, true);
/* Disable ASPM during firmware download due to problems switching
* to AMSS state.
*/
ath11k_pci_aspm_disable(ab_pci);
ath11k_pci_msi_enable(ab_pci);
ret = ath11k_mhi_start(ab_pci);
if (ret) {
ath11k_err(ab, "failed to start mhi: %d\n", ret);
return ret;
}
if (ab->hw_params.static_window_map)
ath11k_pci_select_static_window(ab_pci);
return 0;
}
static void ath11k_pci_power_down(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
/* restore aspm in case firmware bootup fails */
ath11k_pci_aspm_restore(ab_pci);
ath11k_pci_force_wake(ab_pci->ab);
ath11k_pci_msi_disable(ab_pci);
ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
}
static int ath11k_pci_hif_suspend(struct ath11k_base *ab)
{
struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
return ath11k_mhi_suspend(ar_pci);
}
static int ath11k_pci_hif_resume(struct ath11k_base *ab)
{
struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
return ath11k_mhi_resume(ar_pci);
}
static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
{
ath11k_pcic_ce_irqs_enable(ab);
}
static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
{
ath11k_pcic_ce_irq_disable_sync(ab);
}
static int ath11k_pci_start(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
/* TODO: for now don't restore ASPM in case of single MSI
* vector as MHI register reading in M2 causes system hang.
*/
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
ath11k_pci_aspm_restore(ab_pci);
else
ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
ath11k_pcic_start(ab);
return 0;
}
static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.start = ath11k_pci_start,
.stop = ath11k_pcic_stop,
.read32 = ath11k_pcic_read32,
.write32 = ath11k_pcic_write32,
.read = ath11k_pcic_read,
.power_down = ath11k_pci_power_down,
.power_up = ath11k_pci_power_up,
.suspend = ath11k_pci_hif_suspend,
.resume = ath11k_pci_hif_resume,
.irq_enable = ath11k_pcic_ext_irq_enable,
.irq_disable = ath11k_pcic_ext_irq_disable,
.get_msi_address = ath11k_pcic_get_msi_address,
.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
.ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
};
static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
{
u32 soc_hw_version;
soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION);
*major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
soc_hw_version);
*minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
soc_hw_version);
ath11k_dbg(ab, ATH11K_DBG_PCI, "tcsr_soc_hw_version major %d minor %d\n",
*major, *minor);
}
static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
const struct cpumask *m)
{
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
return 0;
return irq_set_affinity_hint(ab_pci->pdev->irq, m);
}
static int ath11k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
const struct ath11k_pci_ops *pci_ops;
int ret;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
}
ab->dev = &pdev->dev;
pci_set_drvdata(pdev, ab);
ab_pci = ath11k_pci_priv(ab);
ab_pci->dev_id = pci_dev->device;
ab_pci->ab = ab;
ab_pci->pdev = pdev;
ab->hif.ops = &ath11k_pci_hif_ops;
ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
pci_set_drvdata(pdev, ab);
spin_lock_init(&ab_pci->window_lock);
/* Set fixed_mem_region to true for platforms support reserved memory
* from DT. If memory is reserved from DT for FW, ath11k driver need not
* allocate memory.
*/
ret = of_property_read_u32(ab->dev->of_node, "memory-region", &addr);
if (!ret)
set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
ret = ath11k_pci_claim(ab_pci, pdev);
if (ret) {
ath11k_err(ab, "failed to claim device: %d\n", ret);
goto err_free_core;
}
ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
ab->id.vendor = pdev->vendor;
ab->id.device = pdev->device;
ab->id.subsystem_vendor = pdev->subsystem_vendor;
ab->id.subsystem_device = pdev->subsystem_device;
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
case 2:
ab->hw_rev = ATH11K_HW_QCA6390_HW20;
break;
default:
dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
soc_hw_version_major, soc_hw_version_minor);
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
pci_ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
pci_ops = &ath11k_pci_ops_qcn9074;
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
case 2:
switch (soc_hw_version_minor) {
case 0x00:
case 0x01:
ab->hw_rev = ATH11K_HW_WCN6855_HW20;
break;
case 0x10:
case 0x11:
ab->hw_rev = ATH11K_HW_WCN6855_HW21;
break;
default:
goto unsupported_wcn6855_soc;
}
break;
default:
unsupported_wcn6855_soc:
dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n",
soc_hw_version_major, soc_hw_version_minor);
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
pci_ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
pci_dev->device);
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
if (ret) {
ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
goto err_pci_free_region;
}
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
goto err_pci_free_region;
}
ret = ath11k_pci_alloc_msi(ab_pci);
if (ret) {
ath11k_err(ab, "failed to enable msi: %d\n", ret);
goto err_pci_free_region;
}
ret = ath11k_core_pre_init(ab);
if (ret)
goto err_pci_disable_msi;
ret = ath11k_mhi_register(ab_pci);
if (ret) {
ath11k_err(ab, "failed to register mhi: %d\n", ret);
goto err_pci_disable_msi;
}
ret = ath11k_hal_srng_init(ab);
if (ret)
goto err_mhi_unregister;
ret = ath11k_ce_alloc_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
goto err_hal_srng_deinit;
}
ath11k_pci_init_qmi_ce_config(ab);
ret = ath11k_pcic_config_irq(ab);
if (ret) {
ath11k_err(ab, "failed to config irq: %d\n", ret);
goto err_ce_free;
}
ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
if (ret) {
ath11k_err(ab, "failed to set irq affinity %d\n", ret);
goto err_free_irq;
}
/* kernel may allocate a dummy vector before request_irq and
* then allocate a real vector when request_irq is called.
* So get msi_data here again to avoid spurious interrupt
* as msi_data will configured to srngs.
*/
ret = ath11k_pci_config_msi_data(ab_pci);
if (ret) {
ath11k_err(ab, "failed to config msi_data: %d\n", ret);
goto err_irq_affinity_cleanup;
}
ret = ath11k_core_init(ab);
if (ret) {
ath11k_err(ab, "failed to init core: %d\n", ret);
goto err_irq_affinity_cleanup;
}
ath11k_qmi_fwreset_from_cold_boot(ab);
return 0;
err_irq_affinity_cleanup:
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
err_free_irq:
ath11k_pcic_free_irq(ab);
err_ce_free:
ath11k_ce_free_pipes(ab);
err_hal_srng_deinit:
ath11k_hal_srng_deinit(ab);
err_mhi_unregister:
ath11k_mhi_unregister(ab_pci);
err_pci_disable_msi:
ath11k_pci_free_msi(ab_pci);
err_pci_free_region:
ath11k_pci_free_region(ab_pci);
err_free_core:
ath11k_core_free(ab);
return ret;
}
static void ath11k_pci_remove(struct pci_dev *pdev)
{
struct ath11k_base *ab = pci_get_drvdata(pdev);
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_pci_power_down(ab);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
}
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
ath11k_core_deinit(ab);
qmi_fail:
ath11k_mhi_unregister(ab_pci);
ath11k_pcic_free_irq(ab);
ath11k_pci_free_msi(ab_pci);
ath11k_pci_free_region(ab_pci);
ath11k_hal_srng_deinit(ab);
ath11k_ce_free_pipes(ab);
ath11k_core_free(ab);
}
static void ath11k_pci_shutdown(struct pci_dev *pdev)
{
struct ath11k_base *ab = pci_get_drvdata(pdev);
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath11k_pci_power_down(ab);
}
static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
{
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
return 0;
}
ret = ath11k_core_suspend(ab);
if (ret)
ath11k_warn(ab, "failed to suspend core: %d\n", ret);
return 0;
}
static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
{
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
return 0;
}
ret = ath11k_core_resume(ab);
if (ret)
ath11k_warn(ab, "failed to resume core: %d\n", ret);
return ret;
}
static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
ath11k_pci_pm_suspend,
ath11k_pci_pm_resume);
static struct pci_driver ath11k_pci_driver = {
.name = "ath11k_pci",
.id_table = ath11k_pci_id_table,
.probe = ath11k_pci_probe,
.remove = ath11k_pci_remove,
.shutdown = ath11k_pci_shutdown,
#ifdef CONFIG_PM
.driver.pm = &ath11k_pci_pm_ops,
#endif
};
static int ath11k_pci_init(void)
{
int ret;
ret = pci_register_driver(&ath11k_pci_driver);
if (ret)
pr_err("failed to register ath11k pci driver: %d\n",
ret);
return ret;
}
module_init(ath11k_pci_init);
static void ath11k_pci_exit(void)
{
pci_unregister_driver(&ath11k_pci_driver);
}
module_exit(ath11k_pci_exit);
MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11ax WLAN devices");
MODULE_LICENSE("Dual BSD/GPL");
/* firmware files */
MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/*");
MODULE_FIRMWARE(ATH11K_FW_DIR "/QCN9074/hw1.0/*");
MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.0/*");
MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.1/*");
|
linux-master
|
drivers/net/wireless/ath/ath11k/pci.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "testmode.h"
#include <net/netlink.h>
#include "debug.h"
#include "wmi.h"
#include "hw.h"
#include "core.h"
#include "testmode_i.h"
#define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
#define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
[ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = ATH11K_TM_DATA_MAX_LEN },
[ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
{
struct ath11k_pdev *pdev;
struct ath11k *ar = NULL;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ar && ar->state == ATH11K_STATE_FTM)
break;
}
return ar;
}
/* This function handles unsegmented events. Data in various events are aggregated
* in application layer, this event is unsegmented from host perspective.
*/
static void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
struct sk_buff *skb)
{
struct sk_buff *nl_skb;
struct ath11k *ar;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"event wmi cmd_id %d skb length %d\n",
cmd_id, skb->len);
ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
ar = ath11k_tm_get_ar(ab);
if (!ar) {
ath11k_warn(ab, "testmode event not handled due to invalid pdev\n");
return;
}
spin_lock_bh(&ar->data_lock);
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * nla_total_size(sizeof(u32)) +
nla_total_size(skb->len),
GFP_ATOMIC);
if (!nl_skb) {
ath11k_warn(ab,
"failed to allocate skb for unsegmented testmode wmi event\n");
goto out;
}
if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) ||
nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) {
ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
kfree_skb(nl_skb);
goto out;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
spin_unlock_bh(&ar->data_lock);
return;
out:
spin_unlock_bh(&ar->data_lock);
ath11k_warn(ab, "Failed to send testmode event to higher layers\n");
}
/* This function handles segmented events. Data of various events received
* from firmware is aggregated and sent to application layer
*/
static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
const struct wmi_ftm_event_msg *ftm_msg,
u16 length)
{
struct sk_buff *nl_skb;
int ret = 0;
struct ath11k *ar;
u8 const *buf_pos;
u16 datalen;
u8 total_segments, current_seq;
u32 data_pos;
u32 pdev_id;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"event wmi cmd_id %d ftm event msg %pK datalen %d\n",
cmd_id, ftm_msg, length);
ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", ftm_msg, length);
pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
if (pdev_id >= ab->num_radios) {
ath11k_warn(ab, "testmode event not handled due to invalid pdev id: %d\n",
pdev_id);
return -EINVAL;
}
ar = ab->pdevs[pdev_id].ar;
if (!ar) {
ath11k_warn(ab, "testmode event not handled due to absence of pdev\n");
return -ENODEV;
}
current_seq = FIELD_GET(ATH11K_FTM_SEGHDR_CURRENT_SEQ,
ftm_msg->seg_hdr.segmentinfo);
total_segments = FIELD_GET(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS,
ftm_msg->seg_hdr.segmentinfo);
datalen = length - (sizeof(struct wmi_ftm_seg_hdr));
buf_pos = ftm_msg->data;
spin_lock_bh(&ar->data_lock);
if (current_seq == 0) {
ab->testmode.expected_seq = 0;
ab->testmode.data_pos = 0;
}
data_pos = ab->testmode.data_pos;
if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) {
ath11k_warn(ab, "Invalid ftm event length at %d: %d\n",
data_pos, datalen);
ret = -EINVAL;
goto out;
}
memcpy(&ab->testmode.eventdata[data_pos], buf_pos, datalen);
data_pos += datalen;
if (++ab->testmode.expected_seq != total_segments) {
ab->testmode.data_pos = data_pos;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"partial data received current_seq %d total_seg %d\n",
current_seq, total_segments);
goto out;
}
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"total data length pos %d len %d\n",
data_pos, ftm_msg->seg_hdr.len);
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * nla_total_size(sizeof(u32)) +
nla_total_size(data_pos),
GFP_ATOMIC);
if (!nl_skb) {
ath11k_warn(ab,
"failed to allocate skb for segmented testmode wmi event\n");
ret = -ENOMEM;
goto out;
}
if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD,
ATH11K_TM_CMD_WMI_FTM) ||
nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos,
&ab->testmode.eventdata[0])) {
ath11k_warn(ab, "failed to populate segmented testmode event");
kfree_skb(nl_skb);
ret = -ENOBUFS;
goto out;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
out:
spin_unlock_bh(&ar->data_lock);
return ret;
}
static void ath11k_tm_wmi_event_segmented(struct ath11k_base *ab, u32 cmd_id,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_ftm_event_msg *ev;
u16 length;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_ARRAY_BYTE];
if (!ev) {
ath11k_warn(ab, "failed to fetch ftm msg\n");
kfree(tb);
return;
}
length = skb->len - TLV_HDR_SIZE;
ret = ath11k_tm_process_event(ab, cmd_id, ev, length);
if (ret)
ath11k_warn(ab, "Failed to process ftm event\n");
kfree(tb);
}
void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id, struct sk_buff *skb)
{
if (test_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
ath11k_tm_wmi_event_segmented(ab, cmd_id, skb);
else
ath11k_tm_wmi_event_unsegmented(ab, cmd_id, skb);
}
static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
{
struct sk_buff *skb;
int ret;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"cmd get version_major %d version_minor %d\n",
ATH11K_TESTMODE_VERSION_MAJOR,
ATH11K_TESTMODE_VERSION_MINOR);
skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
nla_total_size(sizeof(u32)));
if (!skb)
return -ENOMEM;
ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
ATH11K_TESTMODE_VERSION_MAJOR);
if (ret) {
kfree_skb(skb);
return ret;
}
ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
ATH11K_TESTMODE_VERSION_MINOR);
if (ret) {
kfree_skb(skb);
return ret;
}
return cfg80211_testmode_reply(skb);
}
static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
{
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH11K_STATE_FTM) {
ret = -EALREADY;
goto err;
}
/* start utf only when the driver is not in use */
if (ar->state != ATH11K_STATE_OFF) {
ret = -EBUSY;
goto err;
}
ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH,
GFP_KERNEL);
if (!ar->ab->testmode.eventdata) {
ret = -ENOMEM;
goto err;
}
ar->state = ATH11K_STATE_FTM;
ar->ftm_msgref = 0;
mutex_unlock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, "cmd start\n");
return 0;
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
struct ieee80211_vif *vif)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct sk_buff *skb;
struct ath11k_vif *arvif;
u32 cmd_id, buf_len;
int ret, tag;
void *buf;
u32 *ptr;
mutex_lock(&ar->conf_mutex);
if (!tb[ATH11K_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
if (!buf_len) {
ath11k_warn(ar->ab, "No data present in testmode wmi command\n");
ret = -EINVAL;
goto out;
}
cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
/* Make sure that the buffer length is long enough to
* hold TLV and pdev/vdev id.
*/
if (buf_len < sizeof(struct wmi_tlv) + sizeof(u32)) {
ret = -EINVAL;
goto out;
}
ptr = buf;
tag = FIELD_GET(WMI_TLV_TAG, *ptr);
/* pdev/vdev id start after TLV header */
ptr++;
if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
*ptr = ar->pdev->pdev_id;
if (ar->ab->fw_mode != ATH11K_FIRMWARE_MODE_FTM &&
(tag == WMI_TAG_VDEV_SET_PARAM_CMD || tag == WMI_TAG_UNIT_TEST_CMD)) {
if (vif) {
arvif = ath11k_vif_to_arvif(vif);
*ptr = arvif->vdev_id;
} else {
ret = -EINVAL;
goto out;
}
}
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"cmd wmi cmd_id %d buf length %d\n",
cmd_id, buf_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
if (!skb) {
ret = -ENOMEM;
goto out;
}
memcpy(skb->data, buf, buf_len);
ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
if (ret) {
dev_kfree_skb(skb);
ath11k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
ret);
goto out;
}
ret = 0;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_tm_cmd_wmi_ftm(struct ath11k *ar, struct nlattr *tb[])
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = ar->ab;
struct sk_buff *skb;
u32 cmd_id, buf_len, hdr_info;
int ret;
void *buf;
u8 segnumber = 0, seginfo;
u16 chunk_len, total_bytes, num_segments;
u8 *bufpos;
struct wmi_ftm_cmd *ftm_cmd;
set_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_FTM) {
ret = -ENETDOWN;
goto out;
}
if (!tb[ATH11K_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"cmd wmi ftm cmd_id %d buffer length %d\n",
cmd_id, buf_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
bufpos = buf;
total_bytes = buf_len;
num_segments = total_bytes / MAX_WMI_UTF_LEN;
if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
num_segments++;
while (buf_len) {
chunk_len = min_t(u16, buf_len, MAX_WMI_UTF_LEN);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
sizeof(struct wmi_ftm_cmd)));
if (!skb) {
ret = -ENOMEM;
goto out;
}
ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, (chunk_len +
sizeof(struct wmi_ftm_seg_hdr)));
ftm_cmd->tlv_header = hdr_info;
ftm_cmd->seg_hdr.len = total_bytes;
ftm_cmd->seg_hdr.msgref = ar->ftm_msgref;
seginfo = FIELD_PREP(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
FIELD_PREP(ATH11K_FTM_SEGHDR_CURRENT_SEQ, segnumber);
ftm_cmd->seg_hdr.segmentinfo = seginfo;
segnumber++;
memcpy(&ftm_cmd->data, bufpos, chunk_len);
ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
if (ret) {
ath11k_warn(ar->ab, "failed to send wmi ftm command: %d\n", ret);
goto out;
}
buf_len -= chunk_len;
bufpos += chunk_len;
}
ar->ftm_msgref++;
ret = 0;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath11k *ar = hw->priv;
struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
int ret;
ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
NULL);
if (ret)
return ret;
if (!tb[ATH11K_TM_ATTR_CMD])
return -EINVAL;
switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
case ATH11K_TM_CMD_GET_VERSION:
return ath11k_tm_cmd_get_version(ar, tb);
case ATH11K_TM_CMD_WMI:
return ath11k_tm_cmd_wmi(ar, tb, vif);
case ATH11K_TM_CMD_TESTMODE_START:
return ath11k_tm_cmd_testmode_start(ar, tb);
case ATH11K_TM_CMD_WMI_FTM:
return ath11k_tm_cmd_wmi_ftm(ar, tb);
default:
return -EOPNOTSUPP;
}
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/testmode.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*/
#include <linux/relay.h>
#include "core.h"
#include "debug.h"
#define ATH11K_SPECTRAL_NUM_RESP_PER_EVENT 2
#define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS 1
#define ATH11K_SPECTRAL_DWORD_SIZE 4
#define ATH11K_SPECTRAL_MIN_BINS 32
#define ATH11K_SPECTRAL_MIN_IB_BINS (ATH11K_SPECTRAL_MIN_BINS >> 1)
#define ATH11K_SPECTRAL_MAX_IB_BINS(x) ((x)->hw_params.spectral.max_fft_bins >> 1)
#define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095
/* Max channel computed by sum of 2g and 5g band channels */
#define ATH11K_SPECTRAL_TOTAL_CHANNEL 41
#define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL 70
#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) (sizeof(struct fft_sample_ath11k) + \
ATH11K_SPECTRAL_MAX_IB_BINS(x))
#define ATH11K_SPECTRAL_TOTAL_SAMPLE (ATH11K_SPECTRAL_TOTAL_CHANNEL * \
ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL)
#define ATH11K_SPECTRAL_SUB_BUFF_SIZE(x) ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x)
#define ATH11K_SPECTRAL_NUM_SUB_BUF ATH11K_SPECTRAL_TOTAL_SAMPLE
#define ATH11K_SPECTRAL_20MHZ 20
#define ATH11K_SPECTRAL_40MHZ 40
#define ATH11K_SPECTRAL_80MHZ 80
#define ATH11K_SPECTRAL_160MHZ 160
#define ATH11K_SPECTRAL_SIGNATURE 0xFA
#define ATH11K_SPECTRAL_TAG_RADAR_SUMMARY 0x0
#define ATH11K_SPECTRAL_TAG_RADAR_FFT 0x1
#define ATH11K_SPECTRAL_TAG_SCAN_SUMMARY 0x2
#define ATH11K_SPECTRAL_TAG_SCAN_SEARCH 0x3
#define SPECTRAL_TLV_HDR_LEN GENMASK(15, 0)
#define SPECTRAL_TLV_HDR_TAG GENMASK(23, 16)
#define SPECTRAL_TLV_HDR_SIGN GENMASK(31, 24)
#define SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN GENMASK(7, 0)
#define SPECTRAL_SUMMARY_INFO0_OB_FLAG BIT(8)
#define SPECTRAL_SUMMARY_INFO0_GRP_IDX GENMASK(16, 9)
#define SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT BIT(17)
#define SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB GENMASK(27, 18)
#define SPECTRAL_SUMMARY_INFO0_FALSE_SCAN BIT(28)
#define SPECTRAL_SUMMARY_INFO0_DETECTOR_ID GENMASK(30, 29)
#define SPECTRAL_SUMMARY_INFO0_PRI80 BIT(31)
#define SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX GENMASK(11, 0)
#define SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE GENMASK(21, 12)
#define SPECTRAL_SUMMARY_INFO2_NARROWBAND_MASK GENMASK(29, 22)
#define SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE BIT(30)
struct spectral_tlv {
__le32 timestamp;
__le32 header;
} __packed;
struct spectral_summary_fft_report {
__le32 timestamp;
__le32 tlv_header;
__le32 info0;
__le32 reserve0;
__le32 info2;
__le32 reserve1;
} __packed;
struct ath11k_spectral_summary_report {
struct wmi_dma_buf_release_meta_data meta;
u32 timestamp;
u8 agc_total_gain;
u8 grp_idx;
u16 inb_pwr_db;
s16 peak_idx;
u16 peak_mag;
u8 detector_id;
bool out_of_band_flag;
bool rf_saturation;
bool primary80;
bool gain_change;
bool false_scan;
};
#define SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID GENMASK(1, 0)
#define SPECTRAL_FFT_REPORT_INFO0_FFT_NUM GENMASK(4, 2)
#define SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK GENMASK(16, 5)
#define SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX GENMASK(27, 17)
#define SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX GENMASK(30, 28)
#define SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB GENMASK(8, 0)
#define SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB GENMASK(16, 9)
#define SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS GENMASK(7, 0)
#define SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE GENMASK(17, 8)
#define SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB GENMASK(24, 18)
#define SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB GENMASK(31, 25)
struct spectral_search_fft_report {
__le32 timestamp;
__le32 tlv_header;
__le32 info0;
__le32 info1;
__le32 info2;
__le32 reserve0;
u8 bins[];
} __packed;
struct ath11k_spectral_search_report {
u32 timestamp;
u8 detector_id;
u8 fft_count;
u16 radar_check;
s16 peak_idx;
u8 chain_idx;
u16 base_pwr_db;
u8 total_gain_db;
u8 strong_bin_count;
u16 peak_mag;
u8 avg_pwr_db;
u8 rel_pwr_db;
};
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
struct dentry *buf_file;
buf_file = debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
*is_global = 1;
return buf_file;
}
static int remove_buf_file_handler(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
static const struct rchan_callbacks rfs_scan_cb = {
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
static struct ath11k_vif *ath11k_spectral_get_vdev(struct ath11k *ar)
{
struct ath11k_vif *arvif;
lockdep_assert_held(&ar->conf_mutex);
if (list_empty(&ar->arvifs))
return NULL;
/* if there already is a vif doing spectral, return that. */
list_for_each_entry(arvif, &ar->arvifs, list)
if (arvif->spectral_enabled)
return arvif;
/* otherwise, return the first vif. */
return list_first_entry(&ar->arvifs, typeof(*arvif), list);
}
static int ath11k_spectral_scan_trigger(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
arvif = ath11k_spectral_get_vdev(ar);
if (!arvif)
return -ENODEV;
if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED)
return 0;
ar->spectral.is_primary = true;
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
if (ret)
return ret;
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
if (ret)
return ret;
return 0;
}
static int ath11k_spectral_scan_config(struct ath11k *ar,
enum ath11k_spectral_mode mode)
{
struct ath11k_wmi_vdev_spectral_conf_param param = { 0 };
struct ath11k_vif *arvif;
int ret, count;
lockdep_assert_held(&ar->conf_mutex);
arvif = ath11k_spectral_get_vdev(ar);
if (!arvif)
return -ENODEV;
arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED);
spin_lock_bh(&ar->spectral.lock);
ar->spectral.mode = mode;
spin_unlock_bh(&ar->spectral.lock);
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE);
if (ret) {
ath11k_warn(ar->ab, "failed to enable spectral scan: %d\n", ret);
return ret;
}
if (mode == ATH11K_SPECTRAL_DISABLED)
return 0;
if (mode == ATH11K_SPECTRAL_BACKGROUND)
count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT;
else
count = max_t(u16, 1, ar->spectral.count);
param.vdev_id = arvif->vdev_id;
param.scan_count = count;
param.scan_fft_size = ar->spectral.fft_size;
param.scan_period = ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT;
param.scan_priority = ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT;
param.scan_gc_ena = ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT;
param.scan_restart_ena = ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT;
param.scan_noise_floor_ref = ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
param.scan_init_delay = ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT;
param.scan_nb_tone_thr = ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
param.scan_str_bin_thr = ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
param.scan_wb_rpt_mode = ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
param.scan_rssi_rpt_mode = ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
param.scan_rssi_thr = ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT;
param.scan_pwr_format = ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
param.scan_rpt_mode = ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT;
param.scan_bin_scale = ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT;
param.scan_dbm_adj = ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT;
param.scan_chn_mask = ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT;
ret = ath11k_wmi_vdev_spectral_conf(ar, ¶m);
if (ret) {
ath11k_warn(ar->ab, "failed to configure spectral scan: %d\n", ret);
return ret;
}
return 0;
}
static ssize_t ath11k_read_file_spec_scan_ctl(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char *mode = "";
size_t len;
enum ath11k_spectral_mode spectral_mode;
mutex_lock(&ar->conf_mutex);
spectral_mode = ar->spectral.mode;
mutex_unlock(&ar->conf_mutex);
switch (spectral_mode) {
case ATH11K_SPECTRAL_DISABLED:
mode = "disable";
break;
case ATH11K_SPECTRAL_BACKGROUND:
mode = "background";
break;
case ATH11K_SPECTRAL_MANUAL:
mode = "manual";
break;
}
len = strlen(mode);
return simple_read_from_buffer(user_buf, count, ppos, mode, len);
}
static ssize_t ath11k_write_file_spec_scan_ctl(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32];
ssize_t len;
int ret;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
mutex_lock(&ar->conf_mutex);
if (strncmp("trigger", buf, 7) == 0) {
if (ar->spectral.mode == ATH11K_SPECTRAL_MANUAL ||
ar->spectral.mode == ATH11K_SPECTRAL_BACKGROUND) {
/* reset the configuration to adopt possibly changed
* debugfs parameters
*/
ret = ath11k_spectral_scan_config(ar, ar->spectral.mode);
if (ret) {
ath11k_warn(ar->ab, "failed to reconfigure spectral scan: %d\n",
ret);
goto unlock;
}
ret = ath11k_spectral_scan_trigger(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to trigger spectral scan: %d\n",
ret);
}
} else {
ret = -EINVAL;
}
} else if (strncmp("background", buf, 10) == 0) {
ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_BACKGROUND);
} else if (strncmp("manual", buf, 6) == 0) {
ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_MANUAL);
} else if (strncmp("disable", buf, 7) == 0) {
ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
} else {
ret = -EINVAL;
}
unlock:
mutex_unlock(&ar->conf_mutex);
if (ret)
return ret;
return count;
}
static const struct file_operations fops_scan_ctl = {
.read = ath11k_read_file_spec_scan_ctl,
.write = ath11k_write_file_spec_scan_ctl,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_read_file_spectral_count(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32];
size_t len;
u16 spectral_count;
mutex_lock(&ar->conf_mutex);
spectral_count = ar->spectral.count;
mutex_unlock(&ar->conf_mutex);
len = sprintf(buf, "%d\n", spectral_count);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath11k_write_file_spectral_count(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val > ATH11K_SPECTRAL_SCAN_COUNT_MAX)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ar->spectral.count = val;
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_scan_count = {
.read = ath11k_read_file_spectral_count,
.write = ath11k_write_file_spectral_count,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_read_file_spectral_bins(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32];
unsigned int bins, fft_size;
size_t len;
mutex_lock(&ar->conf_mutex);
fft_size = ar->spectral.fft_size;
bins = 1 << fft_size;
mutex_unlock(&ar->conf_mutex);
len = sprintf(buf, "%d\n", bins);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath11k_write_file_spectral_bins(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val < ATH11K_SPECTRAL_MIN_BINS ||
val > ar->ab->hw_params.spectral.max_fft_bins)
return -EINVAL;
if (!is_power_of_2(val))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ar->spectral.fft_size = ilog2(val);
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_scan_bins = {
.read = ath11k_read_file_spectral_bins,
.write = ath11k_write_file_spectral_bins,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int ath11k_spectral_pull_summary(struct ath11k *ar,
struct wmi_dma_buf_release_meta_data *meta,
struct spectral_summary_fft_report *summary,
struct ath11k_spectral_summary_report *report)
{
report->timestamp = __le32_to_cpu(summary->timestamp);
report->agc_total_gain = FIELD_GET(SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN,
__le32_to_cpu(summary->info0));
report->out_of_band_flag = FIELD_GET(SPECTRAL_SUMMARY_INFO0_OB_FLAG,
__le32_to_cpu(summary->info0));
report->grp_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO0_GRP_IDX,
__le32_to_cpu(summary->info0));
report->rf_saturation = FIELD_GET(SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT,
__le32_to_cpu(summary->info0));
report->inb_pwr_db = FIELD_GET(SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB,
__le32_to_cpu(summary->info0));
report->false_scan = FIELD_GET(SPECTRAL_SUMMARY_INFO0_FALSE_SCAN,
__le32_to_cpu(summary->info0));
report->detector_id = FIELD_GET(SPECTRAL_SUMMARY_INFO0_DETECTOR_ID,
__le32_to_cpu(summary->info0));
report->primary80 = FIELD_GET(SPECTRAL_SUMMARY_INFO0_PRI80,
__le32_to_cpu(summary->info0));
report->peak_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX,
__le32_to_cpu(summary->info2));
report->peak_mag = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE,
__le32_to_cpu(summary->info2));
report->gain_change = FIELD_GET(SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE,
__le32_to_cpu(summary->info2));
memcpy(&report->meta, meta, sizeof(*meta));
return 0;
}
static int ath11k_spectral_pull_search(struct ath11k *ar,
struct spectral_search_fft_report *search,
struct ath11k_spectral_search_report *report)
{
report->timestamp = __le32_to_cpu(search->timestamp);
report->detector_id = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID,
__le32_to_cpu(search->info0));
report->fft_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_FFT_NUM,
__le32_to_cpu(search->info0));
report->radar_check = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK,
__le32_to_cpu(search->info0));
report->peak_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX,
__le32_to_cpu(search->info0));
report->chain_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX,
__le32_to_cpu(search->info0));
report->base_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB,
__le32_to_cpu(search->info1));
report->total_gain_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB,
__le32_to_cpu(search->info1));
report->strong_bin_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS,
__le32_to_cpu(search->info2));
report->peak_mag = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE,
__le32_to_cpu(search->info2));
report->avg_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB,
__le32_to_cpu(search->info2));
report->rel_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB,
__le32_to_cpu(search->info2));
return 0;
}
static u8 ath11k_spectral_get_max_exp(s8 max_index, u8 max_magnitude,
int bin_len, u8 *bins)
{
int dc_pos;
u8 max_exp;
dc_pos = bin_len / 2;
/* peak index outside of bins */
if (dc_pos <= max_index || -dc_pos >= max_index)
return 0;
for (max_exp = 0; max_exp < 8; max_exp++) {
if (bins[dc_pos + max_index] == (max_magnitude >> max_exp))
break;
}
/* max_exp not found */
if (bins[dc_pos + max_index] != (max_magnitude >> max_exp))
return 0;
return max_exp;
}
static void ath11k_spectral_parse_fft(u8 *outbins, u8 *inbins, int num_bins, u8 fft_sz)
{
int i, j;
i = 0;
j = 0;
while (i < num_bins) {
outbins[i] = inbins[j];
i++;
j += fft_sz;
}
}
static
int ath11k_spectral_process_fft(struct ath11k *ar,
struct ath11k_spectral_summary_report *summary,
void *data,
struct fft_sample_ath11k *fft_sample,
u32 data_len)
{
struct ath11k_base *ab = ar->ab;
struct spectral_search_fft_report *fft_report = data;
struct ath11k_spectral_search_report search;
struct spectral_tlv *tlv;
int tlv_len, bin_len, num_bins;
u16 length, freq;
u8 chan_width_mhz, bin_sz;
int ret;
u32 check_length;
bool fragment_sample = false;
lockdep_assert_held(&ar->spectral.lock);
if (!ab->hw_params.spectral.fft_sz) {
ath11k_warn(ab, "invalid bin size type for hw rev %d\n",
ab->hw_rev);
return -EINVAL;
}
tlv = (struct spectral_tlv *)data;
tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header));
/* convert Dword into bytes */
tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
bin_len = tlv_len - ab->hw_params.spectral.fft_hdr_len;
if (data_len < (bin_len + sizeof(*fft_report))) {
ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n",
bin_len, data_len);
return -EINVAL;
}
bin_sz = ab->hw_params.spectral.fft_sz + ab->hw_params.spectral.fft_pad_sz;
num_bins = bin_len / bin_sz;
/* Only In-band bins are useful to user for visualize */
num_bins >>= 1;
if (num_bins < ATH11K_SPECTRAL_MIN_IB_BINS ||
num_bins > ATH11K_SPECTRAL_MAX_IB_BINS(ab) ||
!is_power_of_2(num_bins)) {
ath11k_warn(ab, "Invalid num of bins %d\n", num_bins);
return -EINVAL;
}
check_length = sizeof(*fft_report) + (num_bins * ab->hw_params.spectral.fft_sz);
ret = ath11k_dbring_validate_buffer(ar, data, check_length);
if (ret) {
ath11k_warn(ar->ab, "found magic value in fft data, dropping\n");
return ret;
}
ret = ath11k_spectral_pull_search(ar, data, &search);
if (ret) {
ath11k_warn(ab, "failed to pull search report %d\n", ret);
return ret;
}
chan_width_mhz = summary->meta.ch_width;
switch (chan_width_mhz) {
case ATH11K_SPECTRAL_20MHZ:
case ATH11K_SPECTRAL_40MHZ:
case ATH11K_SPECTRAL_80MHZ:
fft_sample->chan_width_mhz = chan_width_mhz;
break;
case ATH11K_SPECTRAL_160MHZ:
if (ab->hw_params.spectral.fragment_160mhz) {
chan_width_mhz /= 2;
fragment_sample = true;
}
fft_sample->chan_width_mhz = chan_width_mhz;
break;
default:
ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz);
return -EINVAL;
}
length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + num_bins;
fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH11K;
fft_sample->tlv.length = __cpu_to_be16(length);
fft_sample->tsf = __cpu_to_be32(search.timestamp);
fft_sample->max_magnitude = __cpu_to_be16(search.peak_mag);
fft_sample->max_index = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX,
__le32_to_cpu(fft_report->info0));
summary->inb_pwr_db >>= 1;
fft_sample->rssi = __cpu_to_be16(summary->inb_pwr_db);
fft_sample->noise = __cpu_to_be32(summary->meta.noise_floor[search.chain_idx]);
freq = summary->meta.freq1;
fft_sample->freq1 = __cpu_to_be16(freq);
freq = summary->meta.freq2;
fft_sample->freq2 = __cpu_to_be16(freq);
/* If freq2 is available then the spectral scan results are fragmented
* as primary and secondary
*/
if (fragment_sample && freq) {
if (!ar->spectral.is_primary)
fft_sample->freq1 = cpu_to_be16(freq);
/* We have to toggle the is_primary to handle the next report */
ar->spectral.is_primary = !ar->spectral.is_primary;
}
ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
ab->hw_params.spectral.fft_sz);
fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index,
search.peak_mag,
num_bins,
fft_sample->data);
if (ar->spectral.rfs_scan)
relay_write(ar->spectral.rfs_scan, fft_sample,
length + sizeof(struct fft_sample_tlv));
return 0;
}
static int ath11k_spectral_process_data(struct ath11k *ar,
struct ath11k_dbring_data *param)
{
struct ath11k_base *ab = ar->ab;
struct spectral_tlv *tlv;
struct spectral_summary_fft_report *summary = NULL;
struct ath11k_spectral_summary_report summ_rpt;
struct fft_sample_ath11k *fft_sample = NULL;
u8 *data;
u32 data_len, i;
u8 sign, tag;
int tlv_len, sample_sz;
int ret;
bool quit = false;
spin_lock_bh(&ar->spectral.lock);
if (!ar->spectral.enabled) {
ret = -EINVAL;
goto unlock;
}
sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_MAX_IB_BINS(ab);
fft_sample = kmalloc(sample_sz, GFP_ATOMIC);
if (!fft_sample) {
ret = -ENOBUFS;
goto unlock;
}
data = param->data;
data_len = param->data_sz;
i = 0;
while (!quit && (i < data_len)) {
if ((i + sizeof(*tlv)) > data_len) {
ath11k_warn(ab, "failed to parse spectral tlv hdr at bytes %d\n",
i);
ret = -EINVAL;
goto err;
}
tlv = (struct spectral_tlv *)&data[i];
sign = FIELD_GET(SPECTRAL_TLV_HDR_SIGN,
__le32_to_cpu(tlv->header));
if (sign != ATH11K_SPECTRAL_SIGNATURE) {
ath11k_warn(ab, "Invalid sign 0x%x at bytes %d\n",
sign, i);
ret = -EINVAL;
goto err;
}
tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN,
__le32_to_cpu(tlv->header));
/* convert Dword into bytes */
tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
if ((i + sizeof(*tlv) + tlv_len) > data_len) {
ath11k_warn(ab, "failed to parse spectral tlv payload at bytes %d tlv_len:%d data_len:%d\n",
i, tlv_len, data_len);
ret = -EINVAL;
goto err;
}
tag = FIELD_GET(SPECTRAL_TLV_HDR_TAG,
__le32_to_cpu(tlv->header));
switch (tag) {
case ATH11K_SPECTRAL_TAG_SCAN_SUMMARY:
/* HW bug in tlv length of summary report,
* HW report 3 DWORD size but the data payload
* is 4 DWORD size (16 bytes).
* Need to remove this workaround once HW bug fixed
*/
tlv_len = sizeof(*summary) - sizeof(*tlv) +
ab->hw_params.spectral.summary_pad_sz;
if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) {
ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n",
i, tlv_len);
ret = -EINVAL;
goto err;
}
ret = ath11k_dbring_validate_buffer(ar, data, tlv_len);
if (ret) {
ath11k_warn(ar->ab, "found magic value in spectral summary, dropping\n");
goto err;
}
summary = (struct spectral_summary_fft_report *)tlv;
ath11k_spectral_pull_summary(ar, ¶m->meta,
summary, &summ_rpt);
break;
case ATH11K_SPECTRAL_TAG_SCAN_SEARCH:
if (tlv_len < (sizeof(struct spectral_search_fft_report) -
sizeof(*tlv))) {
ath11k_warn(ab, "failed to parse spectral search fft at bytes %d\n",
i);
ret = -EINVAL;
goto err;
}
memset(fft_sample, 0, sample_sz);
ret = ath11k_spectral_process_fft(ar, &summ_rpt, tlv,
fft_sample,
data_len - i);
if (ret) {
ath11k_warn(ab, "failed to process spectral fft at bytes %d\n",
i);
goto err;
}
quit = true;
break;
}
i += sizeof(*tlv) + tlv_len;
}
ret = 0;
err:
kfree(fft_sample);
unlock:
spin_unlock_bh(&ar->spectral.lock);
return ret;
}
static int ath11k_spectral_ring_alloc(struct ath11k *ar,
struct ath11k_dbring_cap *db_cap)
{
struct ath11k_spectral *sp = &ar->spectral;
int ret;
ret = ath11k_dbring_srng_setup(ar, &sp->rx_ring,
0, db_cap->min_elem);
if (ret) {
ath11k_warn(ar->ab, "failed to setup db ring\n");
return ret;
}
ath11k_dbring_set_cfg(ar, &sp->rx_ring,
ATH11K_SPECTRAL_NUM_RESP_PER_EVENT,
ATH11K_SPECTRAL_EVENT_TIMEOUT_MS,
ath11k_spectral_process_data);
ret = ath11k_dbring_buf_setup(ar, &sp->rx_ring, db_cap);
if (ret) {
ath11k_warn(ar->ab, "failed to setup db ring buffer\n");
goto srng_cleanup;
}
ret = ath11k_dbring_wmi_cfg_setup(ar, &sp->rx_ring,
WMI_DIRECT_BUF_SPECTRAL);
if (ret) {
ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
goto buffer_cleanup;
}
return 0;
buffer_cleanup:
ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
srng_cleanup:
ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
return ret;
}
static inline void ath11k_spectral_ring_free(struct ath11k *ar)
{
struct ath11k_spectral *sp = &ar->spectral;
ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
}
static inline void ath11k_spectral_debug_unregister(struct ath11k *ar)
{
debugfs_remove(ar->spectral.scan_bins);
ar->spectral.scan_bins = NULL;
debugfs_remove(ar->spectral.scan_count);
ar->spectral.scan_count = NULL;
debugfs_remove(ar->spectral.scan_ctl);
ar->spectral.scan_ctl = NULL;
if (ar->spectral.rfs_scan) {
relay_close(ar->spectral.rfs_scan);
ar->spectral.rfs_scan = NULL;
}
}
int ath11k_spectral_vif_stop(struct ath11k_vif *arvif)
{
if (!arvif->spectral_enabled)
return 0;
return ath11k_spectral_scan_config(arvif->ar, ATH11K_SPECTRAL_DISABLED);
}
void ath11k_spectral_reset_buffer(struct ath11k *ar)
{
if (!ar->spectral.enabled)
return;
if (ar->spectral.rfs_scan)
relay_reset(ar->spectral.rfs_scan);
}
void ath11k_spectral_deinit(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_spectral *sp;
int i;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
sp = &ar->spectral;
if (!sp->enabled)
continue;
mutex_lock(&ar->conf_mutex);
ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
mutex_unlock(&ar->conf_mutex);
spin_lock_bh(&sp->lock);
sp->enabled = false;
spin_unlock_bh(&sp->lock);
ath11k_spectral_debug_unregister(ar);
ath11k_spectral_ring_free(ar);
}
}
static inline int ath11k_spectral_debug_register(struct ath11k *ar)
{
int ret;
ar->spectral.rfs_scan = relay_open("spectral_scan",
ar->debug.debugfs_pdev,
ATH11K_SPECTRAL_SUB_BUFF_SIZE(ar->ab),
ATH11K_SPECTRAL_NUM_SUB_BUF,
&rfs_scan_cb, NULL);
if (!ar->spectral.rfs_scan) {
ath11k_warn(ar->ab, "failed to open relay in pdev %d\n",
ar->pdev_idx);
return -EINVAL;
}
ar->spectral.scan_ctl = debugfs_create_file("spectral_scan_ctl",
0600,
ar->debug.debugfs_pdev, ar,
&fops_scan_ctl);
if (!ar->spectral.scan_ctl) {
ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
ar->pdev_idx);
ret = -EINVAL;
goto debug_unregister;
}
ar->spectral.scan_count = debugfs_create_file("spectral_count",
0600,
ar->debug.debugfs_pdev, ar,
&fops_scan_count);
if (!ar->spectral.scan_count) {
ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
ar->pdev_idx);
ret = -EINVAL;
goto debug_unregister;
}
ar->spectral.scan_bins = debugfs_create_file("spectral_bins",
0600,
ar->debug.debugfs_pdev, ar,
&fops_scan_bins);
if (!ar->spectral.scan_bins) {
ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
ar->pdev_idx);
ret = -EINVAL;
goto debug_unregister;
}
return 0;
debug_unregister:
ath11k_spectral_debug_unregister(ar);
return ret;
}
int ath11k_spectral_init(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_spectral *sp;
struct ath11k_dbring_cap db_cap;
int ret;
int i;
if (!test_bit(WMI_TLV_SERVICE_FREQINFO_IN_METADATA,
ab->wmi_ab.svc_map))
return 0;
if (!ab->hw_params.spectral.fft_sz)
return 0;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
sp = &ar->spectral;
ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx,
WMI_DIRECT_BUF_SPECTRAL,
&db_cap);
if (ret)
continue;
idr_init(&sp->rx_ring.bufs_idr);
spin_lock_init(&sp->rx_ring.idr_lock);
spin_lock_init(&sp->lock);
ret = ath11k_spectral_ring_alloc(ar, &db_cap);
if (ret) {
ath11k_warn(ab, "failed to init spectral ring for pdev %d\n",
i);
goto deinit;
}
spin_lock_bh(&sp->lock);
sp->mode = ATH11K_SPECTRAL_DISABLED;
sp->count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT;
sp->fft_size = ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT;
sp->enabled = true;
spin_unlock_bh(&sp->lock);
ret = ath11k_spectral_debug_register(ar);
if (ret) {
ath11k_warn(ab, "failed to register spectral for pdev %d\n",
i);
goto deinit;
}
}
return 0;
deinit:
ath11k_spectral_deinit(ab);
return ret;
}
enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar)
{
if (ar->spectral.enabled)
return ar->spectral.mode;
else
return ATH11K_SPECTRAL_DISABLED;
}
struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar)
{
if (ar->spectral.enabled)
return &ar->spectral.rx_ring;
else
return NULL;
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/spectral.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
#include "debugfs_sta.h"
#include "hw.h"
#include "peer.h"
#include "mac.h"
static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath11k_base *ab = arvif->ar->ab;
if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
return HAL_TCL_ENCAP_TYPE_RAW;
if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
return HAL_TCL_ENCAP_TYPE_ETHERNET;
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
}
static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
u8 *qos_ctl;
if (!ieee80211_is_data_qos(hdr->frame_control))
return;
qos_ctl = ieee80211_get_qos_ctl(hdr);
memmove(skb->data + IEEE80211_QOS_CTL_LEN,
skb->data, (void *)qos_ctl - (void *)skb->data);
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
hdr = (void *)skb->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
}
static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
else if (!ieee80211_is_data_qos(hdr->frame_control))
return HAL_DESC_REO_NON_QOS_TID;
else
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
}
enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return HAL_ENCRYPT_TYPE_WEP_40;
case WLAN_CIPHER_SUITE_WEP104:
return HAL_ENCRYPT_TYPE_WEP_104;
case WLAN_CIPHER_SUITE_TKIP:
return HAL_ENCRYPT_TYPE_TKIP_MIC;
case WLAN_CIPHER_SUITE_CCMP:
return HAL_ENCRYPT_TYPE_CCMP_128;
case WLAN_CIPHER_SUITE_CCMP_256:
return HAL_ENCRYPT_TYPE_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
return HAL_ENCRYPT_TYPE_GCMP_128;
case WLAN_CIPHER_SUITE_GCMP_256:
return HAL_ENCRYPT_TYPE_AES_GCMP_256;
default:
return HAL_ENCRYPT_TYPE_OPEN;
}
}
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
struct ath11k_sta *arsta, struct sk_buff *skb)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
struct hal_tx_info ti = {0};
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct dp_tx_ring *tx_ring;
void *hal_tcl_desc;
u8 pool_id;
u8 hal_ring_id;
int ret;
u32 ring_selector = 0;
u8 ring_map = 0;
bool tcl_ring_retry;
if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
return -ESHUTDOWN;
if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control)))
return -ENOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
tcl_ring_sel:
tcl_ring_retry = false;
ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
ring_map |= BIT(ti.ring_id);
tx_ring = &dp->tx_ring[ti.ring_id];
spin_lock_bh(&tx_ring->tx_idr_lock);
ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (unlikely(ret < 0)) {
if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
!ab->hw_params.tcl_ring_retry) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
}
/* Check if the next ring is available */
ring_selector++;
goto tcl_ring_sel;
}
ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
if (ieee80211_has_a4(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr3) && arsta &&
arsta->use_4addr_set) {
ti.meta_data_flags = arsta->tcl_metadata;
ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
} else {
ti.meta_data_flags = arvif->tcl_metadata;
}
if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
ti.encrypt_type =
ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
if (ieee80211_has_protected(hdr->frame_control))
skb_put(skb, IEEE80211_CCMP_MIC_LEN);
} else {
ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
}
}
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
ti.type = HAL_TCL_DESC_TYPE_BUFFER;
ti.pkt_offset = 0;
ti.lmac_id = ar->lmac_id;
ti.bss_ast_hash = arvif->ast_hash;
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
}
if (ieee80211_vif_is_mesh(arvif->vif))
ti.enable_mesh = true;
ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
ti.tid = ath11k_dp_tx_get_tid(skb);
switch (ti.encap_type) {
case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
ath11k_dp_tx_encap_nwifi(skb);
break;
case HAL_TCL_ENCAP_TYPE_RAW:
if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
ret = -EINVAL;
goto fail_remove_idr;
}
break;
case HAL_TCL_ENCAP_TYPE_ETHERNET:
/* no need to encap */
break;
case HAL_TCL_ENCAP_TYPE_802_3:
default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
goto fail_remove_idr;
}
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
goto fail_remove_idr;
}
ti.data_len = skb->len;
skb_cb->paddr = ti.paddr;
skb_cb->vif = arvif->vif;
skb_cb->ar = ar;
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
tcl_ring = &ab->hal.srng_list[hal_ring_id];
spin_lock_bh(&tcl_ring->lock);
ath11k_hal_srng_access_begin(ab, tcl_ring);
hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
if (unlikely(!hal_tcl_desc)) {
/* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue.
*/
ath11k_hal_srng_access_end(ab, tcl_ring);
ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
/* Checking for available tcl descriptors in another ring in
* case of failure due to full tcl ring now, is better than
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
tcl_ring_retry = true;
ring_selector++;
}
goto fail_unmap_dma;
}
ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
sizeof(struct hal_tlv_hdr), &ti);
ath11k_hal_srng_access_end(ab, tcl_ring);
ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
spin_unlock_bh(&tcl_ring->lock);
ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
skb->data, skb->len);
atomic_inc(&ar->dp.num_tx_pending);
return 0;
fail_unmap_dma:
dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
fail_remove_idr:
spin_lock_bh(&tx_ring->tx_idr_lock);
idr_remove(&tx_ring->txbuf_idr,
FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (tcl_ring_retry)
goto tcl_ring_sel;
return ret;
}
static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
int msdu_id,
struct dp_tx_ring *tx_ring)
{
struct ath11k *ar;
struct sk_buff *msdu;
struct ath11k_skb_cb *skb_cb;
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
spin_unlock(&tx_ring->tx_idr_lock);
if (unlikely(!msdu)) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
return;
}
skb_cb = ATH11K_SKB_CB(msdu);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
dev_kfree_skb_any(msdu);
ar = ab->pdevs[mac_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
static void
ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
struct dp_tx_ring *tx_ring,
struct ath11k_dp_htt_wbm_tx_status *ts)
{
struct ieee80211_tx_status status = { 0 };
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
struct ath11k *ar;
struct ath11k_peer *peer;
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
spin_unlock(&tx_ring->tx_idr_lock);
if (unlikely(!msdu)) {
ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
ts->msdu_id);
return;
}
skb_cb = ATH11K_SKB_CB(msdu);
info = IEEE80211_SKB_CB(msdu);
ar = skb_cb->ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (!skb_cb->vif) {
ieee80211_free_txskb(ar->hw, msdu);
return;
}
memset(&info->status, 0, sizeof(info->status));
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
info->status.flags |=
IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
}
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, ts->peer_id);
if (!peer || !peer->sta) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"dp_tx: failed to find the peer with peer_id %d\n",
ts->peer_id);
spin_unlock_bh(&ab->base_lock);
ieee80211_free_txskb(ar->hw, msdu);
return;
}
spin_unlock_bh(&ab->base_lock);
status.sta = peer->sta;
status.info = info;
status.skb = msdu;
ieee80211_tx_status_ext(ar->hw, &status);
}
static void
ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
void *desc, u8 mac_id,
u32 msdu_id, struct dp_tx_ring *tx_ring)
{
struct htt_tx_wbm_completion *status_desc;
struct ath11k_dp_htt_wbm_tx_status ts = {0};
enum hal_wbm_htt_tx_comp_status wbm_status;
status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
status_desc->info0);
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.msdu_id = msdu_id;
ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
status_desc->info1);
if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
status_desc->info2);
else
ts.peer_id = HTT_INVALID_PEER_ID;
ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
/* This event is to be handled only when the driver decides to
* use WDS offload functionality.
*/
break;
default:
ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
break;
}
}
static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
if (ts->try_cnt > 1) {
peer_stats->retry_pkts += ts->try_cnt - 1;
peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
peer_stats->failed_pkts += 1;
peer_stats->failed_bytes += msdu->len;
}
}
}
void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
enum hal_tx_rate_stats_pkt_type pkt_type;
enum hal_tx_rate_stats_sgi sgi;
enum hal_tx_rate_stats_bw bw;
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
u16 rate, ru_tones;
u8 mcs, rate_idx = 0, ofdma;
int ret;
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, ts->peer_id);
if (!peer || !peer->sta) {
ath11k_dbg(ab, ATH11K_DBG_DP_TX,
"failed to find the peer by id %u\n", ts->peer_id);
goto err_out;
}
sta = peer->sta;
arsta = (struct ath11k_sta *)sta->drv_priv;
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
ts->rate_stats);
mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
ts->rate_stats);
sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
ts->rate_stats);
bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats);
ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats);
/* This is to prefer choose the real NSS value arsta->last_txrate.nss,
* if it is invalid, then choose the NSS value while assoc.
*/
if (arsta->last_txrate.nss)
arsta->txrate.nss = arsta->last_txrate.nss;
else
arsta->txrate.nss = arsta->peer_nss;
if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
pkt_type,
&rate_idx,
&rate);
if (ret < 0)
goto err_out;
arsta->txrate.legacy = rate;
} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
if (mcs > 7) {
ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
goto err_out;
}
if (arsta->txrate.nss != 0)
arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1);
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
if (mcs > 9) {
ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
goto err_out;
}
arsta->txrate.mcs = mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
if (mcs > 11) {
ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs);
goto err_out;
}
arsta->txrate.mcs = mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
}
arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
arsta->txrate.bw = RATE_INFO_BW_HE_RU;
arsta->txrate.he_ru_alloc =
ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
}
if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
err_out:
spin_unlock_bh(&ab->base_lock);
}
static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
struct ieee80211_tx_status status = { 0 };
struct ieee80211_rate_status status_rate = { 0 };
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
struct rate_info rate;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
return;
}
skb_cb = ATH11K_SKB_CB(msdu);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
ieee80211_free_txskb(ar->hw, msdu);
return;
}
if (unlikely(!skb_cb->vif)) {
ieee80211_free_txskb(ar->hw, msdu);
return;
}
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
/* skip tx rate update from ieee80211_status*/
info->status.rates[0].idx = -1;
if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ||
ab->hw_params.single_pdev_only) {
if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
if (ar->last_ppdu_id == 0) {
ar->last_ppdu_id = ts->ppdu_id;
} else if (ar->last_ppdu_id == ts->ppdu_id ||
ar->cached_ppdu_id == ar->last_ppdu_id) {
ar->cached_ppdu_id = ar->last_ppdu_id;
ar->cached_stats.is_ampdu = true;
ath11k_dp_tx_update_txcompl(ar, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
} else {
ar->cached_stats.is_ampdu = false;
ath11k_dp_tx_update_txcompl(ar, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
}
ar->last_ppdu_id = ts->ppdu_id;
}
ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
}
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, ts->peer_id);
if (!peer || !peer->sta) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"dp_tx: failed to find the peer with peer_id %d\n",
ts->peer_id);
spin_unlock_bh(&ab->base_lock);
ieee80211_free_txskb(ar->hw, msdu);
return;
}
arsta = (struct ath11k_sta *)peer->sta->drv_priv;
status.sta = peer->sta;
status.skb = msdu;
status.info = info;
rate = arsta->last_txrate;
status_rate.rate_idx = rate;
status_rate.try_count = 1;
status.rates = &status_rate;
status.n_rates = 1;
spin_unlock_bh(&ab->base_lock);
ieee80211_tx_status_ext(ar->hw, &status);
}
static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
struct hal_wbm_release_ring *desc,
struct hal_tx_status *ts)
{
ts->buf_rel_source =
FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))
return;
if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW))
return;
ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
desc->info0);
ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
desc->info1);
ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
desc->info1);
ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
desc->info2);
if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
ts->rate_stats = desc->rate_stats.info0;
else
ts->rate_stats = 0;
}
void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
{
struct ath11k *ar;
struct ath11k_dp *dp = &ab->dp;
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct sk_buff *msdu;
struct hal_tx_status ts = { 0 };
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
u32 *desc;
u32 msdu_id;
u8 mac_id;
spin_lock_bh(&status_ring->lock);
ath11k_hal_srng_access_begin(ab, status_ring);
while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
tx_ring->tx_status_tail) &&
(desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
desc, sizeof(struct hal_wbm_release_ring));
tx_ring->tx_status_head =
ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
}
if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
(ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
tx_ring->tx_status_tail))) {
/* TODO: Process pending tx_status messages when kfifo_is_full() */
ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
ath11k_hal_srng_access_end(ab, status_ring);
spin_unlock_bh(&status_ring->lock);
while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
struct hal_wbm_release_ring *tx_status;
u32 desc_id;
tx_ring->tx_status_tail =
ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
ath11k_dp_tx_status_parse(ab, tx_status, &ts);
desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
tx_status->buf_addr_info.info1);
mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
ath11k_dp_tx_process_htt_tx_complete(ab,
(void *)tx_status,
mac_id, msdu_id,
tx_ring);
continue;
}
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
if (unlikely(!msdu)) {
ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
msdu_id);
spin_unlock(&tx_ring->tx_idr_lock);
continue;
}
spin_unlock(&tx_ring->tx_idr_lock);
ar = ab->pdevs[mac_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
}
}
int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
enum hal_reo_cmd_type type,
struct ath11k_hal_reo_cmd *cmd,
void (*cb)(struct ath11k_dp *, void *,
enum hal_reo_cmd_status))
{
struct ath11k_dp *dp = &ab->dp;
struct dp_reo_cmd *dp_cmd;
struct hal_srng *cmd_ring;
int cmd_num;
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
return -ESHUTDOWN;
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
/* cmd_num should start from 1, during failure return the error code */
if (cmd_num < 0)
return cmd_num;
/* reo cmd ring descriptors has cmd_num starting from 1 */
if (cmd_num == 0)
return -EINVAL;
if (!cb)
return 0;
/* Can this be optimized so that we keep the pending command list only
* for tid delete command to free up the resource on the command status
* indication?
*/
dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
if (!dp_cmd)
return -ENOMEM;
memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
dp_cmd->cmd_num = cmd_num;
dp_cmd->handler = cb;
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
spin_unlock_bh(&dp->reo_cmd_lock);
return 0;
}
static int
ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
int mac_id, u32 ring_id,
enum hal_ring_type ring_type,
enum htt_srng_ring_type *htt_ring_type,
enum htt_srng_ring_id *htt_ring_id)
{
int lmac_ring_id_offset = 0;
int ret = 0;
switch (ring_type) {
case HAL_RXDMA_BUF:
lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
/* for QCA6390, host fills rx buffer to fw and fw fills to
* rxbuf ring for each rxdma
*/
if (!ab->hw_params.rx_mac_buf_ring) {
if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
lmac_ring_id_offset) ||
ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
lmac_ring_id_offset))) {
ret = -EINVAL;
}
*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
} else {
if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
*htt_ring_type = HTT_SW_TO_SW_RING;
} else {
*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
}
}
break;
case HAL_RXDMA_DST:
*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_BUF:
*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_STATUS:
*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_DST:
*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_DESC:
*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
default:
ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
ret = -EINVAL;
}
return ret;
}
int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type)
{
struct htt_srng_setup_cmd *cmd;
struct hal_srng *srng = &ab->hal.srng_list[ring_id];
struct hal_srng_params params;
struct sk_buff *skb;
u32 ring_entry_sz;
int len = sizeof(*cmd);
dma_addr_t hp_addr, tp_addr;
enum htt_srng_ring_type htt_ring_type;
enum htt_srng_ring_id htt_ring_id;
int ret;
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
memset(¶ms, 0, sizeof(params));
ath11k_hal_srng_get_params(ab, srng, ¶ms);
hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
ring_type, &htt_ring_type,
&htt_ring_id);
if (ret)
goto err_free;
skb_put(skb, len);
cmd = (struct htt_srng_setup_cmd *)skb->data;
cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
HTT_H2T_MSG_TYPE_SRING_SETUP);
if (htt_ring_type == HTT_SW_TO_HW_RING ||
htt_ring_type == HTT_HW_TO_SW_RING)
cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
DP_SW2HW_MACID(mac_id));
else
cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
mac_id);
cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
htt_ring_type);
cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
cmd->ring_base_addr_lo = params.ring_base_paddr &
HAL_ADDR_LSB_REG_MASK;
cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT;
ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
if (ret < 0)
goto err_free;
ring_entry_sz = ret;
ring_entry_sz >>= 2;
cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
ring_entry_sz);
cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
params.num_entries * ring_entry_sz);
cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
cmd->info1 |= FIELD_PREP(
HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
cmd->info1 |= FIELD_PREP(
HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
if (htt_ring_type == HTT_SW_TO_HW_RING)
cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
HAL_ADDR_MSB_REG_SHIFT;
cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
HAL_ADDR_MSB_REG_SHIFT;
cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr);
cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr);
cmd->msi_data = params.msi_data;
cmd->intr_info = FIELD_PREP(
HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
params.intr_batch_cntr_thres_entries * ring_entry_sz);
cmd->intr_info |= FIELD_PREP(
HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
params.intr_timer_thres_us >> 3);
cmd->info2 = 0;
if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
cmd->info2 = FIELD_PREP(
HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
params.low_threshold);
}
ath11k_dbg(ab, ATH11K_DBG_DP_TX,
"htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n",
cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
cmd->msi_data, ring_id, ring_type, cmd->intr_info, cmd->info2);
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ver_req_cmd *cmd;
int len = sizeof(*cmd);
int ret;
init_completion(&dp->htt_tgt_version_received);
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_ver_req_cmd *)skb->data;
cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
HTT_H2T_MSG_TYPE_VERSION_REQ);
ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ);
if (ret == 0) {
ath11k_warn(ab, "htt target version request timed out\n");
return -ETIMEDOUT;
}
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
return -ENOTSUPP;
}
return 0;
}
int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ppdu_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
u8 pdev_mask;
int ret;
int i;
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
pdev_mask = 1 << (ar->pdev_idx + i);
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
}
return 0;
}
int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type,
int rx_buf_size,
struct htt_rx_ring_tlv_filter *tlv_filter)
{
struct htt_rx_ring_selection_cfg_cmd *cmd;
struct hal_srng *srng = &ab->hal.srng_list[ring_id];
struct hal_srng_params params;
struct sk_buff *skb;
int len = sizeof(*cmd);
enum htt_srng_ring_type htt_ring_type;
enum htt_srng_ring_id htt_ring_id;
int ret;
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
memset(¶ms, 0, sizeof(params));
ath11k_hal_srng_get_params(ab, srng, ¶ms);
ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
ring_type, &htt_ring_type,
&htt_ring_id);
if (ret)
goto err_free;
skb_put(skb, len);
cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
if (htt_ring_type == HTT_SW_TO_HW_RING ||
htt_ring_type == HTT_HW_TO_SW_RING)
cmd->info0 |=
FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
DP_SW2HW_MACID(mac_id));
else
cmd->info0 |=
FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
mac_id);
cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
htt_ring_id);
cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
rx_buf_size);
cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
cmd->rx_filter_tlv = tlv_filter->rx_filter;
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}
int
ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
struct htt_ext_stats_cfg_params *cfg_params,
u64 cookie)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ext_stats_cfg_cmd *cmd;
u32 pdev_id;
int len = sizeof(*cmd);
int ret;
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
if (ab->hw_params.single_pdev_only)
pdev_id = ath11k_mac_get_target_pdev_id(ar);
else
pdev_id = ar->pdev->pdev_id;
cmd->hdr.pdev_mask = 1 << pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cfg_params->cfg0;
cmd->cfg_param1 = cfg_params->cfg1;
cmd->cfg_param2 = cfg_params->cfg2;
cmd->cfg_param3 = cfg_params->cfg3;
cmd->cookie_lsb = lower_32_bits(cookie);
cmd->cookie_msb = upper_32_bits(cookie);
ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
ath11k_warn(ab, "failed to send htt type stats request: %d",
ret);
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_base *ab = ar->ab;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
int ret = 0, ring_id = 0, i;
if (ab->hw_params.full_monitor_mode) {
ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab,
dp->mac_id, !reset);
if (ret < 0) {
ath11k_err(ab, "failed to setup full monitor %d\n", ret);
return ret;
}
}
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
if (!reset) {
tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
tlv_filter.pkt_filter_flags0 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
tlv_filter.pkt_filter_flags1 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
tlv_filter.pkt_filter_flags2 =
HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
tlv_filter.pkt_filter_flags3 =
HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
HTT_RX_MON_MO_DATA_FILTER_FLASG3;
}
if (ab->hw_params.rxdma1_enable) {
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
HAL_RXDMA_MONITOR_BUF,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
} else if (!reset) {
/* set in monitor mode only */
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
dp->mac_id + i,
HAL_RXDMA_BUF,
1024,
&tlv_filter);
}
}
if (ret)
return ret;
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
if (!reset) {
tlv_filter.rx_filter =
HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
} else {
tlv_filter = ath11k_mac_mon_status_filter_default;
if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
}
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
dp->mac_id + i,
HAL_RXDMA_MONITOR_STATUS,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
}
if (!ar->ab->hw_params.rxdma1_enable)
mod_timer(&ar->ab->mon_reap_timer, jiffies +
msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
return ret;
}
int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
bool config)
{
struct htt_rx_full_monitor_mode_cfg_cmd *cmd;
struct sk_buff *skb;
int ret, len = sizeof(*cmd);
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data;
memset(cmd, 0, sizeof(*cmd));
cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE,
HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id);
cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE |
FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING,
HTT_RX_MON_RING_SW);
if (config) {
cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END |
HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END;
}
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/dp_tx.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
#include "mac.h"
#include <net/mac80211.h>
#include "core.h"
#include "hif.h"
#include "debug.h"
#include "wmi.h"
#include "wow.h"
#include "dp_rx.h"
static const struct wiphy_wowlan_support ath11k_wowlan_support = {
.flags = WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE,
.pattern_min_len = WOW_MIN_PATTERN_SIZE,
.pattern_max_len = WOW_MAX_PATTERN_SIZE,
.max_pkt_offset = WOW_MAX_PKT_OFFSET,
};
int ath11k_wow_enable(struct ath11k_base *ab)
{
struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
int i, ret;
clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
reinit_completion(&ab->htc_suspend);
ret = ath11k_wmi_wow_enable(ar);
if (ret) {
ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
if (ret == 0) {
ath11k_warn(ab,
"timed out while waiting for htc suspend completion\n");
return -ETIMEDOUT;
}
if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
/* success, suspend complete received */
return 0;
ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
i);
msleep(ATH11K_WOW_RETRY_WAIT_MS);
}
ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
return -ETIMEDOUT;
}
int ath11k_wow_wakeup(struct ath11k_base *ab)
{
struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
int ret;
/* In the case of WCN6750, WoW wakeup is done
* by sending SMP2P power save exit message
* to the target processor.
*/
if (ab->hw_params.smp2p_wow_exit)
return 0;
reinit_completion(&ab->wow.wakeup_completed);
ret = ath11k_wmi_wow_host_wakeup_ind(ar);
if (ret) {
ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
ret);
return ret;
}
ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
if (ret == 0) {
ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
return -ETIMEDOUT;
}
return 0;
}
static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
int i, ret;
for (i = 0; i < WOW_EVENT_MAX; i++) {
ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
if (ret) {
ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
for (i = 0; i < ar->wow.max_num_patterns; i++) {
ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
if (ret) {
ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
i, arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_cleanup(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_wow_vif_cleanup(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
/* Convert a 802.3 format to a 802.11 format.
* +------------+-----------+--------+----------------+
* 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
* +------------+-----------+--------+----------------+
* |__ |_______ |____________ |________
* | | | |
* +--+------------+----+-----------+---------------+-----------+
* 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
* +--+------------+----+-----------+---------------+-----------+
*/
static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
const struct cfg80211_pkt_pattern *old)
{
u8 hdr_8023_pattern[ETH_HLEN] = {};
u8 hdr_8023_bit_mask[ETH_HLEN] = {};
u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
int total_len = old->pkt_offset + old->pattern_len;
int hdr_80211_end_offset;
struct ieee80211_hdr_3addr *new_hdr_pattern =
(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
struct ieee80211_hdr_3addr *new_hdr_mask =
(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
int hdr_len = sizeof(*new_hdr_pattern);
struct rfc1042_hdr *new_rfc_pattern =
(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
struct rfc1042_hdr *new_rfc_mask =
(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
int rfc_len = sizeof(*new_rfc_pattern);
memcpy(hdr_8023_pattern + old->pkt_offset,
old->pattern, ETH_HLEN - old->pkt_offset);
memcpy(hdr_8023_bit_mask + old->pkt_offset,
old->mask, ETH_HLEN - old->pkt_offset);
/* Copy destination address */
memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
/* Copy source address */
memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
/* Copy logic link type */
memcpy(&new_rfc_pattern->snap_type,
&old_hdr_pattern->h_proto,
sizeof(old_hdr_pattern->h_proto));
memcpy(&new_rfc_mask->snap_type,
&old_hdr_mask->h_proto,
sizeof(old_hdr_mask->h_proto));
/* Compute new pkt_offset */
if (old->pkt_offset < ETH_ALEN)
new->pkt_offset = old->pkt_offset +
offsetof(struct ieee80211_hdr_3addr, addr1);
else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
new->pkt_offset = old->pkt_offset +
offsetof(struct ieee80211_hdr_3addr, addr3) -
offsetof(struct ethhdr, h_source);
else
new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
/* Compute new hdr end offset */
if (total_len > ETH_HLEN)
hdr_80211_end_offset = hdr_len + rfc_len;
else if (total_len > offsetof(struct ethhdr, h_proto))
hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
else if (total_len > ETH_ALEN)
hdr_80211_end_offset = total_len - ETH_ALEN +
offsetof(struct ieee80211_hdr_3addr, addr3);
else
hdr_80211_end_offset = total_len +
offsetof(struct ieee80211_hdr_3addr, addr1);
new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
memcpy((u8 *)new->pattern,
hdr_80211_pattern + new->pkt_offset,
new->pattern_len);
memcpy((u8 *)new->mask,
hdr_80211_bit_mask + new->pkt_offset,
new->pattern_len);
if (total_len > ETH_HLEN) {
/* Copy frame body */
memcpy((u8 *)new->pattern + new->pattern_len,
(void *)old->pattern + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
memcpy((u8 *)new->mask + new->pattern_len,
(void *)old->mask + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
new->pattern_len += total_len - ETH_HLEN;
}
}
static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
struct cfg80211_sched_scan_request *nd_config,
struct wmi_pno_scan_req *pno)
{
int i, j;
u8 ssid_len;
pno->enable = 1;
pno->vdev_id = vdev_id;
pno->uc_networks_count = nd_config->n_match_sets;
if (!pno->uc_networks_count ||
pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
return -EINVAL;
if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
return -EINVAL;
/* Filling per profile params */
for (i = 0; i < pno->uc_networks_count; i++) {
ssid_len = nd_config->match_sets[i].ssid.ssid_len;
if (ssid_len == 0 || ssid_len > 32)
return -EINVAL;
pno->a_networks[i].ssid.ssid_len = ssid_len;
memcpy(pno->a_networks[i].ssid.ssid,
nd_config->match_sets[i].ssid.ssid,
nd_config->match_sets[i].ssid.ssid_len);
pno->a_networks[i].authentication = 0;
pno->a_networks[i].encryption = 0;
pno->a_networks[i].bcast_nw_type = 0;
/* Copying list of valid channel into request */
pno->a_networks[i].channel_count = nd_config->n_channels;
pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
for (j = 0; j < nd_config->n_channels; j++) {
pno->a_networks[i].channels[j] =
nd_config->channels[j]->center_freq;
}
}
/* set scan to passive if no SSIDs are specified in the request */
if (nd_config->n_ssids == 0)
pno->do_passive_scan = true;
else
pno->do_passive_scan = false;
for (i = 0; i < nd_config->n_ssids; i++) {
j = 0;
while (j < pno->uc_networks_count) {
if (pno->a_networks[j].ssid.ssid_len ==
nd_config->ssids[i].ssid_len &&
(memcmp(pno->a_networks[j].ssid.ssid,
nd_config->ssids[i].ssid,
pno->a_networks[j].ssid.ssid_len) == 0)) {
pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
break;
}
j++;
}
}
if (nd_config->n_scan_plans == 2) {
pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
pno->slow_scan_period =
nd_config->scan_plans[1].interval * MSEC_PER_SEC;
} else if (nd_config->n_scan_plans == 1) {
pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
pno->fast_scan_max_cycles = 1;
pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
} else {
ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
nd_config->n_scan_plans);
}
if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
/* enable mac randomization */
pno->enable_pno_scan_randomization = 1;
memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
}
pno->delay_start_time = nd_config->delay;
/* Current FW does not support min-max range for dwell time */
pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
return 0;
}
static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
int ret, i;
unsigned long wow_mask = 0;
struct ath11k *ar = arvif->ar;
const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
int pattern_id = 0;
/* Setup requested WOW features */
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_IBSS:
__set_bit(WOW_BEACON_EVENT, &wow_mask);
fallthrough;
case WMI_VDEV_TYPE_AP:
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
__set_bit(WOW_HTT_EVENT, &wow_mask);
__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
break;
case WMI_VDEV_TYPE_STA:
if (wowlan->disconnect) {
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_BMISS_EVENT, &wow_mask);
__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
}
if (wowlan->magic_pkt)
__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
if (wowlan->nd_config) {
struct wmi_pno_scan_req *pno;
int ret;
pno = kzalloc(sizeof(*pno), GFP_KERNEL);
if (!pno)
return -ENOMEM;
ar->nlo_enabled = true;
ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
wowlan->nd_config, pno);
if (!ret) {
ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
}
kfree(pno);
}
break;
default:
break;
}
for (i = 0; i < wowlan->n_patterns; i++) {
u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
struct cfg80211_pkt_pattern new_pattern = {};
struct cfg80211_pkt_pattern old_pattern = patterns[i];
int j;
new_pattern.pattern = ath_pattern;
new_pattern.mask = ath_bitmask;
if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
continue;
/* convert bytemask to bitmask */
for (j = 0; j < patterns[i].pattern_len; j++)
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
old_pattern.mask = bitmask;
if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
ATH11K_HW_TXRX_NATIVE_WIFI) {
if (patterns[i].pkt_offset < ETH_HLEN) {
u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
memcpy(pattern_ext, old_pattern.pattern,
old_pattern.pattern_len);
old_pattern.pattern = pattern_ext;
ath11k_wow_convert_8023_to_80211(&new_pattern,
&old_pattern);
} else {
new_pattern = old_pattern;
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
}
}
if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
return -EINVAL;
ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
pattern_id,
new_pattern.pattern,
new_pattern.mask,
new_pattern.pattern_len,
new_pattern.pkt_offset);
if (ret) {
ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
pattern_id,
arvif->vdev_id, ret);
return ret;
}
pattern_id++;
__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
}
for (i = 0; i < WOW_EVENT_MAX; i++) {
if (!test_bit(i, &wow_mask))
continue;
ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
if (ret) {
ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_set_wakeups(struct ath11k *ar,
struct cfg80211_wowlan *wowlan)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
if (ret) {
ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
{
int ret = 0;
struct ath11k *ar = arvif->ar;
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
if (ar->nlo_enabled) {
struct wmi_pno_scan_req *pno;
pno = kzalloc(sizeof(*pno), GFP_KERNEL);
if (!pno)
return -ENOMEM;
pno->enable = 0;
ar->nlo_enabled = false;
ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
kfree(pno);
}
break;
default:
break;
}
return ret;
}
static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_vif_wow_clean_nlo(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_set_hw_filter(struct ath11k *ar)
{
struct ath11k_vif *arvif;
u32 bitmap;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
bitmap,
true);
if (ret) {
ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
if (ret) {
ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
continue;
ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
arvif->vdev_id, enable, ret);
return ret;
}
}
return 0;
}
static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
!arvif->is_up ||
!arvif->rekey_data.enable_offload)
continue;
/* get rekey info before disable rekey offload */
if (!enable) {
ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
arvif->vdev_id, ret);
return ret;
}
}
ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
arvif->vdev_id, enable, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
{
int ret;
ret = ath11k_wow_arp_ns_offload(ar, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
enable, ret);
return ret;
}
ret = ath11k_gtk_rekey_offload(ar, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
enable, ret);
return ret;
}
return 0;
}
static int ath11k_wow_set_keepalive(struct ath11k *ar,
enum wmi_sta_keepalive_method method,
u32 interval)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
if (ret)
return ret;
}
return 0;
}
int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
struct ath11k *ar = hw->priv;
int ret;
ret = ath11k_mac_wait_tx_complete(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
mutex_lock(&ar->conf_mutex);
ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
if (ret) {
ath11k_warn(ar->ab,
"failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
ret);
goto exit;
}
ret = ath11k_wow_cleanup(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
ret);
goto exit;
}
ret = ath11k_wow_set_wakeups(ar, wowlan);
if (ret) {
ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
ret);
goto cleanup;
}
ret = ath11k_wow_protocol_offload(ar, true);
if (ret) {
ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
ret);
goto cleanup;
}
ret = ath11k_wow_set_hw_filter(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
ret);
goto cleanup;
}
ret = ath11k_wow_set_keepalive(ar,
WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
if (ret) {
ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
goto cleanup;
}
ret = ath11k_wow_enable(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
goto cleanup;
}
ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
if (ret) {
ath11k_warn(ar->ab,
"failed to stop dp rx pktlog during wow suspend: %d\n",
ret);
goto cleanup;
}
ath11k_ce_stop_shadow_timers(ar->ab);
ath11k_dp_stop_shadow_timers(ar->ab);
ath11k_hif_irq_disable(ar->ab);
ath11k_hif_ce_irq_disable(ar->ab);
ret = ath11k_hif_suspend(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
goto wakeup;
}
goto exit;
wakeup:
ath11k_wow_wakeup(ar->ab);
cleanup:
ath11k_wow_cleanup(ar);
exit:
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
}
void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
{
struct ath11k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
device_set_wakeup_enable(ar->ab->dev, enabled);
mutex_unlock(&ar->conf_mutex);
}
int ath11k_wow_op_resume(struct ieee80211_hw *hw)
{
struct ath11k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
ret = ath11k_hif_resume(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
goto exit;
}
ath11k_hif_ce_irq_enable(ar->ab);
ath11k_hif_irq_enable(ar->ab);
ret = ath11k_dp_rx_pktlog_start(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
goto exit;
}
ret = ath11k_wow_wakeup(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
goto exit;
}
ret = ath11k_wow_nlo_cleanup(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
goto exit;
}
ret = ath11k_wow_clear_hw_filter(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
goto exit;
}
ret = ath11k_wow_protocol_offload(ar, false);
if (ret) {
ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
ret);
goto exit;
}
ret = ath11k_wow_set_keepalive(ar,
WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
if (ret) {
ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
goto exit;
}
exit:
if (ret) {
switch (ar->state) {
case ATH11K_STATE_ON:
ar->state = ATH11K_STATE_RESTARTING;
ret = 1;
break;
case ATH11K_STATE_OFF:
case ATH11K_STATE_RESTARTING:
case ATH11K_STATE_RESTARTED:
case ATH11K_STATE_WEDGED:
case ATH11K_STATE_FTM:
ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
ar->state);
ret = -EIO;
break;
}
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
int ath11k_wow_init(struct ath11k *ar)
{
if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
return 0;
ar->wow.wowlan_support = ath11k_wowlan_support;
if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
ATH11K_HW_TXRX_NATIVE_WIFI) {
ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
}
if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
}
ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
device_set_wakeup_capable(ar->ab->dev, true);
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/wow.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "pcic.h"
#include "debug.h"
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
"bhi",
"mhi-er0",
"mhi-er1",
"ce0",
"ce1",
"ce2",
"ce3",
"ce4",
"ce5",
"ce6",
"ce7",
"ce8",
"ce9",
"ce10",
"ce11",
"host2wbm-desc-feed",
"host2reo-re-injection",
"host2reo-command",
"host2rxdma-monitor-ring3",
"host2rxdma-monitor-ring2",
"host2rxdma-monitor-ring1",
"reo2ost-exception",
"wbm2host-rx-release",
"reo2host-status",
"reo2host-destination-ring4",
"reo2host-destination-ring3",
"reo2host-destination-ring2",
"reo2host-destination-ring1",
"rxdma2host-monitor-destination-mac3",
"rxdma2host-monitor-destination-mac2",
"rxdma2host-monitor-destination-mac1",
"ppdu-end-interrupts-mac3",
"ppdu-end-interrupts-mac2",
"ppdu-end-interrupts-mac1",
"rxdma2host-monitor-status-ring-mac3",
"rxdma2host-monitor-status-ring-mac2",
"rxdma2host-monitor-status-ring-mac1",
"host2rxdma-host-buf-ring-mac3",
"host2rxdma-host-buf-ring-mac2",
"host2rxdma-host-buf-ring-mac1",
"rxdma2host-destination-ring-mac3",
"rxdma2host-destination-ring-mac2",
"rxdma2host-destination-ring-mac1",
"host2tcl-input-ring4",
"host2tcl-input-ring3",
"host2tcl-input-ring2",
"host2tcl-input-ring1",
"wbm2host-tx-completions-ring3",
"wbm2host-tx-completions-ring2",
"wbm2host-tx-completions-ring1",
"tcl2host-status-ring",
};
static const struct ath11k_msi_config ath11k_msi_config[] = {
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_QCA6390_HW20,
},
{
.total_vectors = 16,
.total_users = 3,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
},
.hw_rev = ATH11K_HW_QCN9074_HW10,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW20,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW21,
},
{
.total_vectors = 28,
.total_users = 2,
.users = (struct ath11k_msi_user[]) {
{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
},
.hw_rev = ATH11K_HW_WCN6750_HW10,
},
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
{
const struct ath11k_msi_config *msi_config;
int i;
for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
msi_config = &ath11k_msi_config[i];
if (msi_config->hw_rev == ab->hw_rev)
break;
}
if (i == ARRAY_SIZE(ath11k_msi_config)) {
ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
ab->hw_rev);
return -EINVAL;
}
ab->pci.msi.config = msi_config;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
if (offset < ATH11K_PCI_WINDOW_START)
iowrite32(value, ab->mem + offset);
else
ab->pci.ops->window_write32(ab, offset, value);
}
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
int ret = 0;
bool wakeup_required;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
if (wakeup_required && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
__ath11k_pcic_write32(ab, offset, value);
if (wakeup_required && !ret && ab->pci.ops->release)
ab->pci.ops->release(ab);
}
EXPORT_SYMBOL(ath11k_pcic_write32);
static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
u32 val;
if (offset < ATH11K_PCI_WINDOW_START)
val = ioread32(ab->mem + offset);
else
val = ab->pci.ops->window_read32(ab, offset);
return val;
}
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
int ret = 0;
u32 val;
bool wakeup_required;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
if (wakeup_required && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
val = __ath11k_pcic_read32(ab, offset);
if (wakeup_required && !ret && ab->pci.ops->release)
ab->pci.ops->release(ab);
return val;
}
EXPORT_SYMBOL(ath11k_pcic_read32);
int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
{
int ret = 0;
bool wakeup_required;
u32 *data = buf;
u32 i;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
if (wakeup_required && ab->pci.ops->wakeup) {
ret = ab->pci.ops->wakeup(ab);
if (ret) {
ath11k_warn(ab,
"wakeup failed, data may be invalid: %d",
ret);
/* Even though wakeup() failed, continue processing rather
* than returning because some parts of the data may still
* be valid and useful in some cases, e.g. could give us
* some clues on firmware crash.
* Mislead due to invalid data could be avoided because we
* are aware of the wakeup failure.
*/
}
}
for (i = start; i < end + 1; i += 4)
*data++ = __ath11k_pcic_read32(ab, i);
if (wakeup_required && ab->pci.ops->release)
ab->pci.ops->release(ab);
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_read);
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi)
{
*msi_addr_lo = ab->pci.msi.addr_lo;
*msi_addr_hi = ab->pci.msi.addr_hi;
}
EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector)
{
const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
int idx;
for (idx = 0; idx < msi_config->total_users; idx++) {
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
*num_vectors = msi_config->users[idx].num_vectors;
*base_vector = msi_config->users[idx].base_vector;
*user_base_data = *base_vector + ab->pci.msi.ep_base_data;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
return 0;
}
}
ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
return -EINVAL;
}
EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
{
u32 i, msi_data_idx;
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
if (ce_id == i)
break;
msi_data_idx++;
}
*msi_idx = msi_data_idx;
}
EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
{
int i, j;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
}
}
void ath11k_pcic_free_irq(struct ath11k_base *ab)
{
int i, irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
}
ath11k_pcic_free_ext_irq(ab);
}
EXPORT_SYMBOL(ath11k_pcic_free_irq);
static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
enable_irq(ab->irq_num[irq_idx]);
}
static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
disable_irq_nosync(ab->irq_num[irq_idx]);
}
static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pcic_ce_irq_disable(ab, i);
}
}
static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
{
int i;
int irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
synchronize_irq(ab->irq_num[irq_idx]);
}
}
static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
{
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
enable_irq(ce_pipe->ab->irq_num[irq_idx]);
}
static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
struct ath11k_base *ab = ce_pipe->ab;
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
disable_irq_nosync(ab->irq_num[irq_idx]);
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
}
static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_base *ab = irq_grp->ab;
int i;
/* In case of one MSI vector, we handle irq enable/disable
* in a uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc)
{
int i;
clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
ath11k_pcic_ext_grp_disable(irq_grp);
if (irq_grp->napi_enabled) {
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
irq_grp->napi_enabled = false;
}
}
}
static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_base *ab = irq_grp->ab;
int i;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
ath11k_pcic_ext_grp_enable(irq_grp);
}
}
EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
{
int i, j, irq_idx;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++) {
irq_idx = irq_grp->irqs[j];
synchronize_irq(ab->irq_num[irq_idx]);
}
}
}
void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
{
__ath11k_pcic_ext_irq_disable(ab);
ath11k_pcic_sync_ext_irqs(ab);
}
EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
{
struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
struct ath11k_ext_irq_grp,
napi);
struct ath11k_base *ab = irq_grp->ab;
int work_done;
int i;
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
if (work_done > budget)
work_done = budget;
return work_done;
}
static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
struct ath11k_base *ab = irq_grp->ab;
int i;
if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq);
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
napi_schedule(&irq_grp->napi);
return IRQ_HANDLED;
}
static int
ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{
return ab->pci.ops->get_msi_irq(ab, vector);
}
static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
{
int i, j, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
&user_base_data,
&base_vector);
if (ret < 0)
return ret;
irq_flags = IRQF_SHARED;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
irq_flags |= IRQF_NOBALANCING;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pcic_ext_grp_napi_poll);
if (ab->hw_params.ring_mask->tx[i] ||
ab->hw_params.ring_mask->rx[i] ||
ab->hw_params.ring_mask->rx_err[i] ||
ab->hw_params.ring_mask->rx_wbm_rel[i] ||
ab->hw_params.ring_mask->reo_status[i] ||
ab->hw_params.ring_mask->rxdma2host[i] ||
ab->hw_params.ring_mask->host2rxdma[i] ||
ab->hw_params.ring_mask->rx_mon_status[i]) {
num_irq = 1;
}
irq_grp->num_irq = num_irq;
irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
int vector = (i % num_vectors) + base_vector;
int irq = ath11k_pcic_get_msi_irq(ab, vector);
if (irq < 0)
return irq;
ab->irq_num[irq_idx] = irq;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"irq %d group %d\n", irq, i);
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
irq_flags, "DP_EXT_IRQ", irq_grp);
if (ret) {
ath11k_err(ab, "failed request irq %d: %d\n",
vector, ret);
return ret;
}
}
ath11k_pcic_ext_grp_disable(irq_grp);
}
return 0;
}
int ath11k_pcic_config_irq(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *ce_pipe;
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
unsigned int msi_data;
int irq, i, ret, irq_idx;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
&msi_data_start, &msi_irq_start);
if (ret)
return ret;
irq_flags = IRQF_SHARED;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
irq_flags |= IRQF_NOBALANCING;
/* Configure CE irqs */
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
irq = ath11k_pcic_get_msi_irq(ab, msi_data);
if (irq < 0)
return irq;
ce_pipe = &ab->ce.ce_pipe[i];
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
irq_flags, irq_name[irq_idx], ce_pipe);
if (ret) {
ath11k_err(ab, "failed to request irq %d: %d\n",
irq_idx, ret);
return ret;
}
ab->irq_num[irq_idx] = irq;
msi_data_idx++;
ath11k_pcic_ce_irq_disable(ab, i);
}
ret = ath11k_pcic_ext_irq_config(ab);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_config_irq);
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pcic_ce_irq_enable(ab, i);
}
}
EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
}
}
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
{
ath11k_pcic_ce_irqs_disable(ab);
ath11k_pcic_sync_ce_irqs(ab);
ath11k_pcic_kill_tasklets(ab);
}
EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
void ath11k_pcic_stop(struct ath11k_base *ab)
{
ath11k_pcic_ce_irq_disable_sync(ab);
ath11k_ce_cleanup_pipes(ab);
}
EXPORT_SYMBOL(ath11k_pcic_stop);
int ath11k_pcic_start(struct ath11k_base *ab)
{
set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pcic_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_start);
int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
entry = &ab->hw_params.svc_to_ce_map[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
switch (__le32_to_cpu(entry->pipedir)) {
case PIPEDIR_NONE:
break;
case PIPEDIR_IN:
WARN_ON(dl_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
break;
case PIPEDIR_OUT:
WARN_ON(ul_set);
*ul_pipe = __le32_to_cpu(entry->pipenum);
ul_set = true;
break;
case PIPEDIR_INOUT:
WARN_ON(dl_set);
WARN_ON(ul_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
*ul_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
ul_set = true;
break;
}
}
if (WARN_ON(!ul_set || !dl_set))
return -ENOENT;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
const struct ath11k_pci_ops *pci_ops)
{
if (!pci_ops)
return 0;
/* Return error if mandatory pci_ops callbacks are missing */
if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
!pci_ops->window_read32)
return -EINVAL;
ab->pci.ops = pci_ops;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
i == ATH11K_PCI_CE_WAKE_IRQ)
continue;
ath11k_pcic_ce_irq_enable(ab, i);
}
}
EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
{
int i;
int irq_idx;
struct ath11k_ce_pipe *ce_pipe;
for (i = 0; i < ab->hw_params.ce_count; i++) {
ce_pipe = &ab->ce.ce_pipe[i];
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
i == ATH11K_PCI_CE_WAKE_IRQ)
continue;
disable_irq_nosync(ab->irq_num[irq_idx]);
synchronize_irq(ab->irq_num[irq_idx]);
tasklet_kill(&ce_pipe->intr_tq);
}
}
EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
|
linux-master
|
drivers/net/wireless/ath/ath11k/pcic.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "debug.h"
#include "hal.h"
#include "hal_tx.h"
#include "hal_rx.h"
#include "hal_desc.h"
#include "hif.h"
static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
u8 owner, u8 buffer_type, u32 magic)
{
hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
/* Magic pattern in reserved bits for debugging */
hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
}
static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv,
struct ath11k_hal_reo_cmd *cmd)
{
struct hal_reo_get_queue_stats *desc;
tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_get_queue_stats *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
desc->queue_addr_lo = cmd->addr_lo;
desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
cmd->addr_hi);
if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
}
static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
struct ath11k_hal_reo_cmd *cmd)
{
struct hal_reo_flush_cache *desc;
u8 avail_slot = ffz(hal->avail_blk_resource);
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
return -ENOSPC;
hal->current_blk_index = avail_slot;
}
tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_flush_cache *)tlv->value;
memset_startat(desc, 0, cache_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
desc->cache_addr_lo = cmd->addr_lo;
desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
cmd->addr_hi);
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
desc->info0 |=
FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
avail_slot);
}
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
}
static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
struct ath11k_hal_reo_cmd *cmd)
{
struct hal_reo_update_rx_queue *desc;
tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_update_rx_queue *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
desc->queue_addr_lo = cmd->addr_lo;
desc->info0 =
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
cmd->addr_hi) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
desc->info1 =
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
cmd->rx_queue_num) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
if (cmd->pn_size == 24)
cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
else if (cmd->pn_size == 48)
cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
else if (cmd->pn_size == 128)
cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
if (cmd->ba_window_size < 1)
cmd->ba_window_size = 1;
if (cmd->ba_window_size == 1)
cmd->ba_window_size++;
desc->info2 =
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
cmd->ba_window_size - 1) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
!!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
!!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
!!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
}
int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
enum hal_reo_cmd_type type,
struct ath11k_hal_reo_cmd *cmd)
{
struct hal_tlv_hdr *reo_desc;
int ret;
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
reo_desc = (struct hal_tlv_hdr *)ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!reo_desc) {
ret = -ENOBUFS;
goto out;
}
switch (type) {
case HAL_REO_CMD_GET_QUEUE_STATS:
ret = ath11k_hal_reo_cmd_queue_stats(reo_desc, cmd);
break;
case HAL_REO_CMD_FLUSH_CACHE:
ret = ath11k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
break;
case HAL_REO_CMD_UPDATE_RX_QUEUE:
ret = ath11k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
break;
case HAL_REO_CMD_FLUSH_QUEUE:
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath11k_warn(ab, "Unsupported reo command %d\n", type);
ret = -ENOTSUPP;
break;
default:
ath11k_warn(ab, "Unknown reo command %d\n", type);
ret = -EINVAL;
break;
}
ath11k_dp_shadow_start_timer(ab, srng, &ab->dp.reo_cmd_timer);
out:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return ret;
}
void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
u32 cookie, u8 manager)
{
struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
u32 paddr_lo, paddr_hi;
paddr_lo = lower_32_bits(paddr);
paddr_hi = upper_32_bits(paddr);
binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
}
void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
u32 *cookie, u8 *rbm)
{
struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
*paddr =
(((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, binfo->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
*cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
}
void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
u32 *msdu_cookies,
enum hal_rx_buf_return_buf_manager *rbm)
{
struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc;
struct hal_rx_msdu_details *msdu;
int i;
*num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
msdu = &link->msdu_link[0];
*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
msdu->buf_addr_info.info1);
for (i = 0; i < *num_msdus; i++) {
msdu = &link->msdu_link[i];
if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
msdu->buf_addr_info.info0)) {
*num_msdus = i;
break;
}
*msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
msdu->buf_addr_info.info1);
msdu_cookies++;
}
}
int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
dma_addr_t *paddr, u32 *desc_bank)
{
struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
enum hal_reo_dest_ring_push_reason push_reason;
enum hal_reo_dest_ring_error_code err_code;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
desc->info0);
err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
desc->info0);
ab->soc_stats.reo_error[err_code]++;
if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
ath11k_warn(ab, "expected error push reason code, received %d\n",
push_reason);
return -EINVAL;
}
if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
ath11k_warn(ab, "expected buffer type link_desc");
return -EINVAL;
}
ath11k_hal_rx_reo_ent_paddr_get(ab, rx_desc, paddr, desc_bank);
return 0;
}
int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
struct hal_rx_wbm_rel_info *rel_info)
{
struct hal_wbm_release_ring *wbm_desc = desc;
enum hal_wbm_rel_desc_type type;
enum hal_wbm_rel_src_module rel_src;
enum hal_rx_buf_return_buf_manager ret_buf_mgr;
type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
wbm_desc->info0);
/* We expect only WBM_REL buffer type */
if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
WARN_ON(1);
return -EINVAL;
}
rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
wbm_desc->info0);
if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
rel_src != HAL_WBM_REL_SRC_MODULE_REO)
return -EINVAL;
ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1);
if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
wbm_desc->buf_addr_info.info1);
rel_info->err_rel_src = rel_src;
if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
rel_info->push_reason =
FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON,
wbm_desc->info0);
rel_info->err_code =
FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE,
wbm_desc->info0);
} else {
rel_info->push_reason =
FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON,
wbm_desc->info0);
rel_info->err_code =
FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE,
wbm_desc->info0);
}
rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
wbm_desc->info2);
rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
wbm_desc->info2);
return 0;
}
void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
dma_addr_t *paddr, u32 *desc_bank)
{
struct ath11k_buffer_addr *buff_addr = desc;
*paddr = ((u64)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR, buff_addr->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
*desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
}
void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
void *link_desc,
enum hal_wbm_rel_bm_act action)
{
struct hal_wbm_release_ring *dst_desc = desc;
struct hal_wbm_release_ring *src_desc = link_desc;
dst_desc->buf_addr_info = src_desc->buf_addr_info;
dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
HAL_WBM_REL_SRC_MODULE_SW) |
FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
}
void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_get_queue_stats_status *desc =
(struct hal_reo_get_queue_stats_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
ath11k_dbg(ab, ATH11K_DBG_HAL, "Queue stats status:\n");
ath11k_dbg(ab, ATH11K_DBG_HAL, "header: cmd_num %d status %d\n",
status->uniform_hdr.cmd_num,
status->uniform_hdr.cmd_status);
ath11k_dbg(ab, ATH11K_DBG_HAL, "ssn %ld cur_idx %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
desc->info0),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
desc->info0));
ath11k_dbg(ab, ATH11K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
ath11k_dbg(ab, ATH11K_DBG_HAL,
"last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
desc->last_rx_enqueue_timestamp,
desc->last_rx_dequeue_timestamp);
ath11k_dbg(ab, ATH11K_DBG_HAL,
"rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
desc->rx_bitmap[6], desc->rx_bitmap[7]);
ath11k_dbg(ab, ATH11K_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
desc->info1),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
desc->info1));
ath11k_dbg(ab, ATH11K_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
desc->info2),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
desc->info2),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
desc->info2));
ath11k_dbg(ab, ATH11K_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
desc->info3),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
desc->info3));
ath11k_dbg(ab, ATH11K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
desc->num_mpdu_frames, desc->num_msdu_frames,
desc->total_bytes);
ath11k_dbg(ab, ATH11K_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
desc->info4),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
desc->info4),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
desc->info4));
ath11k_dbg(ab, ATH11K_DBG_HAL, "looping count %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
desc->info5));
}
int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_status_hdr *hdr;
hdr = (struct hal_reo_status_hdr *)tlv->value;
*status = FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, hdr->info0);
return FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, hdr->info0);
}
void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_flush_queue_status *desc =
(struct hal_reo_flush_queue_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.flush_queue.err_detected =
FIELD_GET(HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED,
desc->info0);
}
void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status)
{
struct ath11k_hal *hal = &ab->hal;
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_flush_cache_status *desc =
(struct hal_reo_flush_cache_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.flush_cache.err_detected =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR,
desc->info0);
status->u.flush_cache.err_code =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE,
desc->info0);
if (!status->u.flush_cache.err_code)
hal->avail_blk_resource |= BIT(hal->current_blk_index);
status->u.flush_cache.cache_controller_flush_status_hit =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT,
desc->info0);
status->u.flush_cache.cache_controller_flush_status_desc_type =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
desc->info0);
status->u.flush_cache.cache_controller_flush_status_client_id =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
desc->info0);
status->u.flush_cache.cache_controller_flush_status_err =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
desc->info0);
status->u.flush_cache.cache_controller_flush_status_cnt =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
desc->info0);
}
void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status)
{
struct ath11k_hal *hal = &ab->hal;
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_unblock_cache_status *desc =
(struct hal_reo_unblock_cache_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.unblock_cache.err_detected =
FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR,
desc->info0);
status->u.unblock_cache.unblock_type =
FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE,
desc->info0);
if (!status->u.unblock_cache.err_detected &&
status->u.unblock_cache.unblock_type ==
HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
}
void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_flush_timeout_list_status *desc =
(struct hal_reo_flush_timeout_list_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.timeout_list.err_detected =
FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR,
desc->info0);
status->u.timeout_list.list_empty =
FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY,
desc->info0);
status->u.timeout_list.release_desc_cnt =
FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT,
desc->info1);
status->u.timeout_list.fwd_buf_cnt =
FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT,
desc->info1);
}
void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_desc_thresh_reached_status *desc =
(struct hal_reo_desc_thresh_reached_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.desc_thresh_reached.threshold_idx =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX,
desc->info0);
status->u.desc_thresh_reached.link_desc_counter0 =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0,
desc->info1);
status->u.desc_thresh_reached.link_desc_counter1 =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1,
desc->info2);
status->u.desc_thresh_reached.link_desc_counter2 =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2,
desc->info3);
status->u.desc_thresh_reached.link_desc_counter_sum =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
desc->info4);
}
void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_status_hdr *desc =
(struct hal_reo_status_hdr *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->info0);
}
u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
{
u32 num_ext_desc;
if (ba_window_size <= 1) {
if (tid != HAL_DESC_REO_NON_QOS_TID)
num_ext_desc = 1;
else
num_ext_desc = 0;
} else if (ba_window_size <= 105) {
num_ext_desc = 1;
} else if (ba_window_size <= 210) {
num_ext_desc = 2;
} else {
num_ext_desc = 3;
}
return sizeof(struct hal_rx_reo_queue) +
(num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
}
void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
u32 start_seq, enum hal_pn_type type)
{
struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
struct hal_rx_reo_queue_ext *ext_desc;
memset(qdesc, 0, sizeof(*qdesc));
ath11k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
qdesc->info0 =
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, ath11k_tid_to_ac(tid));
if (ba_window_size < 1)
ba_window_size = 1;
if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
ba_window_size++;
if (ba_window_size == 1)
qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
ba_window_size - 1);
switch (type) {
case HAL_PN_TYPE_NONE:
case HAL_PN_TYPE_WAPI_EVEN:
case HAL_PN_TYPE_WAPI_UNEVEN:
break;
case HAL_PN_TYPE_WPA:
qdesc->info0 |=
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
HAL_RX_REO_QUEUE_PN_SIZE_48);
break;
}
/* TODO: Set Ignore ampdu flags based on BA window size and/or
* AMPDU capabilities
*/
qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
if (start_seq <= 0xfff)
qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
start_seq);
if (tid == HAL_DESC_REO_NON_QOS_TID)
return;
ext_desc = qdesc->ext_desc;
/* TODO: HW queue descriptors are currently allocated for max BA
* window size for all QOS TIDs so that same descriptor can be used
* later when ADDBA request is received. This should be changed to
* allocate HW queue descriptors based on BA window size being
* negotiated (0 for non BA cases), and reallocate when BA window
* size changes and also send WMI message to FW to change the REO
* queue descriptor in Rx peer entry as part of dp_rx_tid_update.
*/
memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
ext_desc++;
memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
ext_desc++;
memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
}
void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
struct hal_srng *srng)
{
struct hal_srng_params params;
struct hal_tlv_hdr *tlv;
struct hal_reo_get_queue_stats *desc;
int i, cmd_num = 1;
int entry_size;
u8 *entry;
memset(¶ms, 0, sizeof(params));
entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
ath11k_hal_srng_get_params(ab, srng, ¶ms);
entry = (u8 *)params.ring_base_vaddr;
for (i = 0; i < params.num_entries; i++) {
tlv = (struct hal_tlv_hdr *)entry;
desc = (struct hal_reo_get_queue_stats *)tlv->value;
desc->cmd.info0 =
FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, cmd_num++);
entry += entry_size;
}
}
#define HAL_MAX_UL_MU_USERS 37
static inline void
ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
(struct hal_rx_ppdu_end_user_stats *)rx_tlv;
rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]);
}
static inline void
ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
(struct hal_rx_ppdu_end_user_stats *)rx_tlv;
rx_user_status->mpdu_ok_byte_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT,
__le32_to_cpu(ppdu_end_user->rsvd2[6]));
rx_user_status->mpdu_err_byte_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT,
__le32_to_cpu(ppdu_end_user->rsvd2[8]));
}
static inline void
ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *rx_user_status)
{
rx_user_status->ast_index = ppdu_info->ast_index;
rx_user_status->tid = ppdu_info->tid;
rx_user_status->tcp_msdu_count =
ppdu_info->tcp_msdu_count;
rx_user_status->udp_msdu_count =
ppdu_info->udp_msdu_count;
rx_user_status->other_msdu_count =
ppdu_info->other_msdu_count;
rx_user_status->frame_control = ppdu_info->frame_control;
rx_user_status->frame_control_info_valid =
ppdu_info->frame_control_info_valid;
rx_user_status->data_sequence_control_info_valid =
ppdu_info->data_sequence_control_info_valid;
rx_user_status->first_data_seq_ctrl =
ppdu_info->first_data_seq_ctrl;
rx_user_status->preamble_type = ppdu_info->preamble_type;
rx_user_status->ht_flags = ppdu_info->ht_flags;
rx_user_status->vht_flags = ppdu_info->vht_flags;
rx_user_status->he_flags = ppdu_info->he_flags;
rx_user_status->rs_flags = ppdu_info->rs_flags;
rx_user_status->mpdu_cnt_fcs_ok =
ppdu_info->num_mpdu_fcs_ok;
rx_user_status->mpdu_cnt_fcs_err =
ppdu_info->num_mpdu_fcs_err;
ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
}
static u16 ath11k_hal_rx_mpduinfo_get_peerid(struct ath11k_base *ab,
struct hal_rx_mpdu_info *mpdu_info)
{
return ab->hw_params.hw_ops->mpdu_info_get_peerid(mpdu_info);
}
static enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
u32 tlv_tag, u8 *tlv_data, u32 userid)
{
u32 info0, info1, value;
u8 he_dcm = 0, he_stbc = 0;
u16 he_gi = 0, he_ltf = 0;
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
struct hal_rx_ppdu_start *ppdu_start =
(struct hal_rx_ppdu_start *)tlv_data;
ppdu_info->ppdu_id =
FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
__le32_to_cpu(ppdu_start->info0));
ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
break;
}
case HAL_RX_PPDU_END_USER_STATS: {
struct hal_rx_ppdu_end_user_stats *eu_stats =
(struct hal_rx_ppdu_end_user_stats *)tlv_data;
info0 = __le32_to_cpu(eu_stats->info0);
info1 = __le32_to_cpu(eu_stats->info1);
ppdu_info->ast_index =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
__le32_to_cpu(eu_stats->info2));
ppdu_info->tid =
ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
__le32_to_cpu(eu_stats->info6))) - 1;
ppdu_info->tcp_msdu_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT,
__le32_to_cpu(eu_stats->info4));
ppdu_info->udp_msdu_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT,
__le32_to_cpu(eu_stats->info4));
ppdu_info->other_msdu_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT,
__le32_to_cpu(eu_stats->info5));
ppdu_info->tcp_ack_msdu_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT,
__le32_to_cpu(eu_stats->info5));
ppdu_info->preamble_type =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE, info1);
ppdu_info->num_mpdu_fcs_ok =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK,
info1);
ppdu_info->num_mpdu_fcs_err =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
info0);
switch (ppdu_info->preamble_type) {
case HAL_RX_PREAMBLE_11N:
ppdu_info->ht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AC:
ppdu_info->vht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AX:
ppdu_info->he_flags = 1;
break;
default:
break;
}
if (userid < HAL_MAX_UL_MU_USERS) {
struct hal_rx_user_status *rxuser_stats =
&ppdu_info->userstats;
ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
rxuser_stats);
}
ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
__le32_to_cpu(eu_stats->rsvd1[0]);
ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
__le32_to_cpu(eu_stats->rsvd1[1]);
break;
}
case HAL_RX_PPDU_END_USER_STATS_EXT: {
struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
(struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
break;
}
case HAL_PHYRX_HT_SIG: {
struct hal_rx_ht_sig_info *ht_sig =
(struct hal_rx_ht_sig_info *)tlv_data;
info0 = __le32_to_cpu(ht_sig->info0);
info1 = __le32_to_cpu(ht_sig->info1);
ppdu_info->mcs = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_MCS, info0);
ppdu_info->bw = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_BW, info0);
ppdu_info->is_stbc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_STBC,
info1);
ppdu_info->ldpc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING, info1);
ppdu_info->gi = info1 & HAL_RX_HT_SIG_INFO_INFO1_GI;
switch (ppdu_info->mcs) {
case 0 ... 7:
ppdu_info->nss = 1;
break;
case 8 ... 15:
ppdu_info->nss = 2;
break;
case 16 ... 23:
ppdu_info->nss = 3;
break;
case 24 ... 31:
ppdu_info->nss = 4;
break;
}
if (ppdu_info->nss > 1)
ppdu_info->mcs = ppdu_info->mcs % 8;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
case HAL_PHYRX_L_SIG_B: {
struct hal_rx_lsig_b_info *lsigb =
(struct hal_rx_lsig_b_info *)tlv_data;
ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_B_INFO_INFO0_RATE,
__le32_to_cpu(lsigb->info0));
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
case HAL_PHYRX_L_SIG_A: {
struct hal_rx_lsig_a_info *lsiga =
(struct hal_rx_lsig_a_info *)tlv_data;
ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_A_INFO_INFO0_RATE,
__le32_to_cpu(lsiga->info0));
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
case HAL_PHYRX_VHT_SIG_A: {
struct hal_rx_vht_sig_a_info *vht_sig =
(struct hal_rx_vht_sig_a_info *)tlv_data;
u32 nsts;
u32 group_id;
u8 gi_setting;
info0 = __le32_to_cpu(vht_sig->info0);
info1 = __le32_to_cpu(vht_sig->info1);
ppdu_info->ldpc = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING,
info1);
ppdu_info->mcs = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_MCS,
info1);
gi_setting = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING,
info1);
switch (gi_setting) {
case HAL_RX_VHT_SIG_A_NORMAL_GI:
ppdu_info->gi = HAL_RX_GI_0_8_US;
break;
case HAL_RX_VHT_SIG_A_SHORT_GI:
case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
ppdu_info->gi = HAL_RX_GI_0_4_US;
break;
}
ppdu_info->is_stbc = info0 & HAL_RX_VHT_SIG_A_INFO_INFO0_STBC;
nsts = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS, info0);
if (ppdu_info->is_stbc && nsts > 0)
nsts = ((nsts + 1) >> 1) - 1;
ppdu_info->nss = (nsts & VHT_SIG_SU_NSS_MASK) + 1;
ppdu_info->bw = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_BW,
info0);
ppdu_info->beamformed = info1 &
HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED;
group_id = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID,
info0);
if (group_id == 0 || group_id == 63)
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
else
ppdu_info->reception_type =
HAL_RX_RECEPTION_TYPE_MU_MIMO;
ppdu_info->vht_flag_values5 = group_id;
ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
ppdu_info->nss);
ppdu_info->vht_flag_values2 = ppdu_info->bw;
ppdu_info->vht_flag_values4 =
FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
break;
}
case HAL_PHYRX_HE_SIG_A_SU: {
struct hal_rx_he_sig_a_su_info *he_sig_a =
(struct hal_rx_he_sig_a_su_info *)tlv_data;
ppdu_info->he_flags = 1;
info0 = __le32_to_cpu(he_sig_a->info0);
info1 = __le32_to_cpu(he_sig_a->info1);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
if (value == 0)
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
else
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
ppdu_info->he_data2 |=
IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
ppdu_info->he_data3 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
ppdu_info->mcs = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
ppdu_info->dcm = he_dcm;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
ppdu_info->is_stbc = he_stbc;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
/* data4 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
ppdu_info->he_data4 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
/* data5 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->bw = value;
ppdu_info->he_data5 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_1_X;
break;
case 1:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
if (he_dcm && he_stbc) {
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
} else {
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
}
break;
}
ppdu_info->gi = he_gi;
he_gi = (he_gi != 0) ? he_gi - 1 : 0;
ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
ppdu_info->ltf_size = he_ltf;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
(he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
ppdu_info->beamformed = value;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
/* data6 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
value++;
ppdu_info->nss = value;
ppdu_info->he_data6 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
case HAL_PHYRX_HE_SIG_A_MU_DL: {
struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
(struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
ppdu_info->he_mu_flags = 1;
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
ppdu_info->he_data2 =
IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
/*data3*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
ppdu_info->he_data3 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
he_stbc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
/*data4*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
ppdu_info->he_data4 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
/*data5*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->bw = value;
ppdu_info->he_data5 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
break;
case 1:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
break;
}
ppdu_info->gi = he_gi;
he_gi = (he_gi != 0) ? he_gi - 1 : 0;
ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
ppdu_info->ltf_size = he_ltf;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
(he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
/*data6*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
info0);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
/* HE-MU Flags */
/* HE-MU-flags1 */
ppdu_info->he_flags1 =
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
ppdu_info->he_flags1 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
ppdu_info->he_flags1 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
value);
/* HE-MU-flags2 */
ppdu_info->he_flags2 =
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
value = value - 1;
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
value);
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
case HAL_PHYRX_HE_SIG_B1_MU: {
struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
(struct hal_rx_he_sig_b1_mu_info *)tlv_data;
u16 ru_tones;
info0 = __le32_to_cpu(he_sig_b1_mu->info0);
ru_tones = FIELD_GET(HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION,
info0);
ppdu_info->ru_alloc =
ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
ppdu_info->he_RU[0] = ru_tones;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
case HAL_PHYRX_HE_SIG_B2_MU: {
struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
(struct hal_rx_he_sig_b2_mu_info *)tlv_data;
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
ppdu_info->ldpc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
ppdu_info->he_data4 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
break;
}
case HAL_PHYRX_HE_SIG_B2_OFDMA: {
struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
(struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
/* HE-data2 */
ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
he_dcm = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
ppdu_info->ldpc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
/* HE-data4 */
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
ppdu_info->he_data4 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
info0) + 1;
ppdu_info->beamformed =
info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
break;
}
case HAL_PHYRX_RSSI_LEGACY: {
int i;
bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
ab->wmi_ab.svc_map);
struct hal_rx_phyrx_rssi_legacy_info *rssi =
(struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
/* TODO: Please note that the combined rssi will not be accurate
* in MU case. Rssi in MU needs to be retrieved from
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB,
__le32_to_cpu(rssi->info0));
if (db2dbm) {
for (i = 0; i < ARRAY_SIZE(rssi->preamble); i++) {
ppdu_info->rssi_chain_pri20[i] =
le32_get_bits(rssi->preamble[i].rssi_2040,
HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20);
}
}
break;
}
case HAL_RX_MPDU_START: {
struct hal_rx_mpdu_info *mpdu_info =
(struct hal_rx_mpdu_info *)tlv_data;
u16 peer_id;
peer_id = ath11k_hal_rx_mpduinfo_get_peerid(ab, mpdu_info);
if (peer_id)
ppdu_info->peer_id = peer_id;
break;
}
case HAL_RXPCU_PPDU_END_INFO: {
struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
(struct hal_rx_ppdu_end_duration *)tlv_data;
ppdu_info->rx_duration =
FIELD_GET(HAL_RX_PPDU_END_DURATION,
__le32_to_cpu(ppdu_rx_duration->info0));
ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
ppdu_info->tsft = (ppdu_info->tsft << 32) |
__le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
break;
}
case HAL_DUMMY:
return HAL_RX_MON_STATUS_BUF_DONE;
case HAL_RX_PPDU_END_STATUS_DONE:
case 0:
return HAL_RX_MON_STATUS_PPDU_DONE;
default:
break;
}
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
}
enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct sk_buff *skb)
{
struct hal_tlv_hdr *tlv;
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
u16 tlv_tag;
u16 tlv_len;
u32 tlv_userid = 0;
u8 *ptr = skb->data;
do {
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
* TLVs that follow. Skip the TLV header and
* rx_rxpcu_classification_overview that follows the header to get to
* next TLV.
*/
if (tlv_tag == HAL_RX_PPDU_END)
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
break;
} while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
return hal_status;
}
void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
u32 *sw_cookie, void **pp_buf_addr,
u8 *rbm, u32 *msdu_cnt)
{
struct hal_reo_entrance_ring *reo_ent_ring =
(struct hal_reo_entrance_ring *)rx_desc;
struct ath11k_buffer_addr *buf_addr_info;
struct rx_mpdu_desc *rx_mpdu_desc_info_details;
rx_mpdu_desc_info_details =
(struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
*msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
rx_mpdu_desc_info_details->info0);
buf_addr_info = (struct ath11k_buffer_addr *)&reo_ent_ring->buf_addr_info;
*paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
buf_addr_info->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
buf_addr_info->info0);
*sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
buf_addr_info->info1);
*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
buf_addr_info->info1);
*pp_buf_addr = (void *)buf_addr_info;
}
void
ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
struct hal_sw_mon_ring_entries *sw_mon_entries)
{
struct hal_sw_monitor_ring *sw_mon_ring = rx_desc;
struct ath11k_buffer_addr *buf_addr_info;
struct ath11k_buffer_addr *status_buf_addr_info;
struct rx_mpdu_desc *rx_mpdu_desc_info_details;
rx_mpdu_desc_info_details = &sw_mon_ring->rx_mpdu_info;
sw_mon_entries->msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
rx_mpdu_desc_info_details->info0);
buf_addr_info = &sw_mon_ring->buf_addr_info;
status_buf_addr_info = &sw_mon_ring->status_buf_addr_info;
sw_mon_entries->mon_dst_paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
buf_addr_info->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
buf_addr_info->info0);
sw_mon_entries->mon_status_paddr =
(((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
status_buf_addr_info->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
status_buf_addr_info->info0);
sw_mon_entries->mon_dst_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
buf_addr_info->info1);
sw_mon_entries->mon_status_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
status_buf_addr_info->info1);
sw_mon_entries->status_buf_count = FIELD_GET(HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT,
sw_mon_ring->info0);
sw_mon_entries->dst_buf_addr_info = buf_addr_info;
sw_mon_entries->status_buf_addr_info = status_buf_addr_info;
sw_mon_entries->ppdu_id =
FIELD_GET(HAL_SW_MON_RING_INFO1_PHY_PPDU_ID, sw_mon_ring->info1);
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/hal_rx.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include "core.h"
#include "debug.h"
static int
ath11k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
*state = ATH11K_THERMAL_THROTTLE_MAX;
return 0;
}
static int
ath11k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct ath11k *ar = cdev->devdata;
mutex_lock(&ar->conf_mutex);
*state = ar->thermal.throttle_state;
mutex_unlock(&ar->conf_mutex);
return 0;
}
static int
ath11k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long throttle_state)
{
struct ath11k *ar = cdev->devdata;
int ret;
if (throttle_state > ATH11K_THERMAL_THROTTLE_MAX) {
ath11k_warn(ar->ab, "throttle state %ld is exceeding the limit %d\n",
throttle_state, ATH11K_THERMAL_THROTTLE_MAX);
return -EINVAL;
}
mutex_lock(&ar->conf_mutex);
ret = ath11k_thermal_set_throttling(ar, throttle_state);
if (ret == 0)
ar->thermal.throttle_state = throttle_state;
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct thermal_cooling_device_ops ath11k_thermal_ops = {
.get_max_state = ath11k_thermal_get_max_throttle_state,
.get_cur_state = ath11k_thermal_get_cur_throttle_state,
.set_cur_state = ath11k_thermal_set_cur_throttle_state,
};
static ssize_t ath11k_thermal_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ath11k *ar = dev_get_drvdata(dev);
int ret, temperature;
unsigned long time_left;
mutex_lock(&ar->conf_mutex);
/* Can't get temperature when the card is off */
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
reinit_completion(&ar->thermal.wmi_sync);
ret = ath11k_wmi_send_pdev_temperature_cmd(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to read temperature %d\n", ret);
goto out;
}
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) {
ret = -ESHUTDOWN;
goto out;
}
time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
ATH11K_THERMAL_SYNC_TIMEOUT_HZ);
if (!time_left) {
ath11k_warn(ar->ab, "failed to synchronize thermal read\n");
ret = -ETIMEDOUT;
goto out;
}
spin_lock_bh(&ar->data_lock);
temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock);
/* display in millidegree Celsius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature)
{
spin_lock_bh(&ar->data_lock);
ar->thermal.temperature = temperature;
spin_unlock_bh(&ar->data_lock);
complete(&ar->thermal.wmi_sync);
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath11k_thermal_show_temp,
NULL, 0);
static struct attribute *ath11k_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(ath11k_hwmon);
int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
{
struct ath11k_base *sc = ar->ab;
struct thermal_mitigation_params param;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON)
return 0;
memset(¶m, 0, sizeof(param));
param.pdev_id = ar->pdev->pdev_id;
param.enable = throttle_state ? 1 : 0;
param.dc = ATH11K_THERMAL_DEFAULT_DUTY_CYCLE;
param.dc_per_event = 0xFFFFFFFF;
param.levelconf[0].tmplwm = ATH11K_THERMAL_TEMP_LOW_MARK;
param.levelconf[0].tmphwm = ATH11K_THERMAL_TEMP_HIGH_MARK;
param.levelconf[0].dcoffpercent = throttle_state;
param.levelconf[0].priority = 0; /* disable all data tx queues */
ret = ath11k_wmi_send_thermal_mitigation_param_cmd(ar, ¶m);
if (ret) {
ath11k_warn(sc, "failed to send thermal mitigation duty cycle %u ret %d\n",
throttle_state, ret);
}
return ret;
}
int ath11k_thermal_register(struct ath11k_base *sc)
{
struct thermal_cooling_device *cdev;
struct device *hwmon_dev;
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i, ret;
for (i = 0; i < sc->num_radios; i++) {
pdev = &sc->pdevs[i];
ar = pdev->ar;
if (!ar)
continue;
cdev = thermal_cooling_device_register("ath11k_thermal", ar,
&ath11k_thermal_ops);
if (IS_ERR(cdev)) {
ath11k_err(sc, "failed to setup thermal device result: %ld\n",
PTR_ERR(cdev));
ret = -EINVAL;
goto err_thermal_destroy;
}
ar->thermal.cdev = cdev;
ret = sysfs_create_link(&ar->hw->wiphy->dev.kobj, &cdev->device.kobj,
"cooling_device");
if (ret) {
ath11k_err(sc, "failed to create cooling device symlink\n");
goto err_thermal_destroy;
}
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
hwmon_dev = devm_hwmon_device_register_with_groups(&ar->hw->wiphy->dev,
"ath11k_hwmon", ar,
ath11k_hwmon_groups);
if (IS_ERR(hwmon_dev)) {
ath11k_err(ar->ab, "failed to register hwmon device: %ld\n",
PTR_ERR(hwmon_dev));
ret = -EINVAL;
goto err_thermal_destroy;
}
}
return 0;
err_thermal_destroy:
ath11k_thermal_unregister(sc);
return ret;
}
void ath11k_thermal_unregister(struct ath11k_base *sc)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i;
for (i = 0; i < sc->num_radios; i++) {
pdev = &sc->pdevs[i];
ar = pdev->ar;
if (!ar)
continue;
sysfs_remove_link(&ar->hw->wiphy->dev.kobj, "cooling_device");
thermal_cooling_device_unregister(ar->thermal.cdev);
}
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/thermal.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
#include "debugfs_htt_stats.h"
#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
#define HTT_TLV_HDR_LEN 4
#define PRINT_ARRAY_TO_BUF(out, buflen, arr, str, len, newline) \
do { \
int index = 0; u8 i; const char *str_val = str; \
const char *new_line = newline; \
if (str_val) { \
index += scnprintf((out + buflen), \
(ATH11K_HTT_STATS_BUF_SIZE - buflen), \
"%s = ", str_val); \
} \
for (i = 0; i < len; i++) { \
index += scnprintf((out + buflen) + index, \
(ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
" %u:%u,", i, arr[i]); \
} \
index += scnprintf((out + buflen) + index, \
(ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
"%s", new_line); \
buflen += index; \
} while (0)
static inline void htt_print_stats_string_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_stats_string_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i;
tag_len = tag_len >> 2;
len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n");
len += scnprintf(buf + len, buf_len - len,
"data = ");
for (i = 0; i < tag_len; i++) {
len += scnprintf(buf + len,
buf_len - len,
"%.*s", 4, (char *)&(htt_stats_buf->data[i]));
}
/* New lines are added for better display */
len += scnprintf(buf + len, buf_len - len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
htt_stats_buf->hw_queued);
len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
htt_stats_buf->hw_reaped);
len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
htt_stats_buf->underrun);
len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
htt_stats_buf->hw_paused);
len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
htt_stats_buf->hw_flush);
len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
htt_stats_buf->hw_filt);
len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
htt_stats_buf->tx_abort);
len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
htt_stats_buf->mpdu_requeued);
len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
htt_stats_buf->tx_xretry);
len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
htt_stats_buf->data_rc);
len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
htt_stats_buf->mpdu_dropped_xretry);
len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
htt_stats_buf->illgl_rate_phy_err);
len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
htt_stats_buf->cont_xretry);
len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
htt_stats_buf->tx_timeout);
len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
htt_stats_buf->pdev_resets);
len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
htt_stats_buf->phy_underrun);
len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
htt_stats_buf->txop_ovf);
len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
htt_stats_buf->seq_posted);
len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
htt_stats_buf->seq_failed_queueing);
len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
htt_stats_buf->seq_completed);
len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
htt_stats_buf->seq_restarted);
len += scnprintf(buf + len, buf_len - len, "mu_seq_posted = %u\n",
htt_stats_buf->mu_seq_posted);
len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
htt_stats_buf->seq_switch_hw_paused);
len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
htt_stats_buf->next_seq_posted_dsr);
len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
htt_stats_buf->seq_posted_isr);
len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
htt_stats_buf->seq_ctrl_cached);
len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
htt_stats_buf->mpdu_count_tqm);
len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
htt_stats_buf->msdu_count_tqm);
len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
htt_stats_buf->mpdu_removed_tqm);
len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
htt_stats_buf->msdu_removed_tqm);
len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
htt_stats_buf->mpdus_sw_flush);
len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
htt_stats_buf->mpdus_hw_filter);
len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
htt_stats_buf->mpdus_truncated);
len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
htt_stats_buf->mpdus_ack_failed);
len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
htt_stats_buf->mpdus_expired);
len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
htt_stats_buf->mpdus_seq_hw_retry);
len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
htt_stats_buf->ack_tlv_proc);
len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
htt_stats_buf->coex_abort_mpdu_cnt_valid);
len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
htt_stats_buf->coex_abort_mpdu_cnt);
len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
htt_stats_buf->num_total_ppdus_tried_ota);
len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
htt_stats_buf->num_data_ppdus_tried_ota);
len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
htt_stats_buf->local_ctrl_mgmt_enqued);
len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
htt_stats_buf->local_ctrl_mgmt_freed);
len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
htt_stats_buf->local_data_enqued);
len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
htt_stats_buf->local_data_freed);
len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
htt_stats_buf->mpdu_tried);
len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
htt_stats_buf->isr_wait_seq_posted);
len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
htt_stats_buf->tx_active_dur_us_low);
len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n\n",
htt_stats_buf->tx_active_dur_us_high);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_stats_urrn_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->urrn_stats, "urrn_stats",
num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_stats_flush_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->flush_errs, "flush_errs",
num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_stats_sifs_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_status, "sifs_status",
num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_stats_phy_err_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_errs, "phy_errs",
num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_stats_sifs_hist_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_hist_status,
"sifs_hist_status", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:\n");
len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u\n",
htt_stats_buf->num_data_ppdus_legacy_su);
len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u\n",
htt_stats_buf->num_data_ppdus_ac_su);
len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u\n",
htt_stats_buf->num_data_ppdus_ax_su);
len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u\n",
htt_stats_buf->num_data_ppdus_ac_su_txbf);
len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n\n",
htt_stats_buf->num_data_ppdus_ax_su_txbf);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:\n");
len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
htt_stats_buf->hist_bin_size);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
"tried_mpdu_cnt_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
HTT_STATS_MAX_HW_INTR_NAME_LEN);
len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", hw_intr_name);
len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
htt_stats_buf->mask);
len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_hw_stats_wd_timeout_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n");
memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
HTT_STATS_MAX_HW_MODULE_NAME_LEN);
len += scnprintf(buf + len, buf_len - len, "hw_module_name = %s\n",
hw_module_name);
len += scnprintf(buf + len, buf_len - len, "count = %u\n",
htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_hw_stats_pdev_errs_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
htt_stats_buf->tx_abort);
len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
htt_stats_buf->tx_abort_fail_count);
len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
htt_stats_buf->rx_abort);
len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
htt_stats_buf->rx_abort_fail_count);
len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
htt_stats_buf->warm_reset);
len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
htt_stats_buf->cold_reset);
len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
htt_stats_buf->tx_flush);
len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
htt_stats_buf->tx_glb_reset);
len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
htt_stats_buf->tx_txq_reset);
len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
htt_stats_buf->rx_timeout_reset);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_msdu_flow_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "last_update_timestamp = %u\n",
htt_stats_buf->last_update_timestamp);
len += scnprintf(buf + len, buf_len - len, "last_add_timestamp = %u\n",
htt_stats_buf->last_add_timestamp);
len += scnprintf(buf + len, buf_len - len, "last_remove_timestamp = %u\n",
htt_stats_buf->last_remove_timestamp);
len += scnprintf(buf + len, buf_len - len, "total_processed_msdu_count = %u\n",
htt_stats_buf->total_processed_msdu_count);
len += scnprintf(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u\n",
htt_stats_buf->cur_msdu_count_in_flowq);
len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
htt_stats_buf->sw_peer_id);
len += scnprintf(buf + len, buf_len - len, "tx_flow_no = %lu\n",
FIELD_GET(HTT_MSDU_FLOW_STATS_TX_FLOW_NO,
htt_stats_buf->tx_flow_no__tid_num__drop_rule));
len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
FIELD_GET(HTT_MSDU_FLOW_STATS_TID_NUM,
htt_stats_buf->tx_flow_no__tid_num__drop_rule));
len += scnprintf(buf + len, buf_len - len, "drop_rule = %lu\n",
FIELD_GET(HTT_MSDU_FLOW_STATS_DROP_RULE,
htt_stats_buf->tx_flow_no__tid_num__drop_rule));
len += scnprintf(buf + len, buf_len - len, "last_cycle_enqueue_count = %u\n",
htt_stats_buf->last_cycle_enqueue_count);
len += scnprintf(buf + len, buf_len - len, "last_cycle_dequeue_count = %u\n",
htt_stats_buf->last_cycle_dequeue_count);
len += scnprintf(buf + len, buf_len - len, "last_cycle_drop_count = %u\n",
htt_stats_buf->last_cycle_drop_count);
len += scnprintf(buf + len, buf_len - len, "current_drop_th = %u\n\n",
htt_stats_buf->current_drop_th);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_tid_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
FIELD_GET(HTT_TX_TID_STATS_SW_PEER_ID,
htt_stats_buf->sw_peer_id__tid_num));
len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
FIELD_GET(HTT_TX_TID_STATS_TID_NUM,
htt_stats_buf->sw_peer_id__tid_num));
len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
FIELD_GET(HTT_TX_TID_STATS_NUM_SCHED_PENDING,
htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
FIELD_GET(HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ,
htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
htt_stats_buf->tid_flags);
len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
htt_stats_buf->hw_queued);
len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
htt_stats_buf->hw_reaped);
len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
htt_stats_buf->mpdus_hw_filter);
len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
htt_stats_buf->qdepth_bytes);
len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
htt_stats_buf->qdepth_num_msdu);
len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
htt_stats_buf->qdepth_num_mpdu);
len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
htt_stats_buf->last_scheduled_tsmp);
len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
htt_stats_buf->pause_module_id);
len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n\n",
htt_stats_buf->block_module_id);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_tid_stats_v1_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
FIELD_GET(HTT_TX_TID_STATS_V1_SW_PEER_ID,
htt_stats_buf->sw_peer_id__tid_num));
len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
FIELD_GET(HTT_TX_TID_STATS_V1_TID_NUM,
htt_stats_buf->sw_peer_id__tid_num));
len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
FIELD_GET(HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING,
htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
FIELD_GET(HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ,
htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
htt_stats_buf->tid_flags);
len += scnprintf(buf + len, buf_len - len, "max_qdepth_bytes = %u\n",
htt_stats_buf->max_qdepth_bytes);
len += scnprintf(buf + len, buf_len - len, "max_qdepth_n_msdus = %u\n",
htt_stats_buf->max_qdepth_n_msdus);
len += scnprintf(buf + len, buf_len - len, "rsvd = %u\n",
htt_stats_buf->rsvd);
len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
htt_stats_buf->qdepth_bytes);
len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
htt_stats_buf->qdepth_num_msdu);
len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
htt_stats_buf->qdepth_num_mpdu);
len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
htt_stats_buf->last_scheduled_tsmp);
len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
htt_stats_buf->pause_module_id);
len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n",
htt_stats_buf->block_module_id);
len += scnprintf(buf + len, buf_len - len, "allow_n_flags = 0x%x\n",
htt_stats_buf->allow_n_flags);
len += scnprintf(buf + len, buf_len - len, "sendn_frms_allowed = %u\n\n",
htt_stats_buf->sendn_frms_allowed);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_tid_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
FIELD_GET(HTT_RX_TID_STATS_SW_PEER_ID,
htt_stats_buf->sw_peer_id__tid_num));
len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
FIELD_GET(HTT_RX_TID_STATS_TID_NUM,
htt_stats_buf->sw_peer_id__tid_num));
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
len += scnprintf(buf + len, buf_len - len, "dup_in_reorder = %u\n",
htt_stats_buf->dup_in_reorder);
len += scnprintf(buf + len, buf_len - len, "dup_past_outside_window = %u\n",
htt_stats_buf->dup_past_outside_window);
len += scnprintf(buf + len, buf_len - len, "dup_past_within_window = %u\n",
htt_stats_buf->dup_past_within_window);
len += scnprintf(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n\n",
htt_stats_buf->rxdesc_err_decrypt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_counter_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_counter_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_COUNTER_TLV:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->counter_name,
"counter_name",
HTT_MAX_COUNTER_NAME, "\n");
len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_peer_stats_cmn_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "ppdu_cnt = %u\n",
htt_stats_buf->ppdu_cnt);
len += scnprintf(buf + len, buf_len - len, "mpdu_cnt = %u\n",
htt_stats_buf->mpdu_cnt);
len += scnprintf(buf + len, buf_len - len, "msdu_cnt = %u\n",
htt_stats_buf->msdu_cnt);
len += scnprintf(buf + len, buf_len - len, "pause_bitmap = %u\n",
htt_stats_buf->pause_bitmap);
len += scnprintf(buf + len, buf_len - len, "block_bitmap = %u\n",
htt_stats_buf->block_bitmap);
len += scnprintf(buf + len, buf_len - len, "last_rssi = %d\n",
htt_stats_buf->rssi);
len += scnprintf(buf + len, buf_len - len, "enqueued_count = %llu\n",
htt_stats_buf->peer_enqueued_count_low |
((u64)htt_stats_buf->peer_enqueued_count_high << 32));
len += scnprintf(buf + len, buf_len - len, "dequeued_count = %llu\n",
htt_stats_buf->peer_dequeued_count_low |
((u64)htt_stats_buf->peer_dequeued_count_high << 32));
len += scnprintf(buf + len, buf_len - len, "dropped_count = %llu\n",
htt_stats_buf->peer_dropped_count_low |
((u64)htt_stats_buf->peer_dropped_count_high << 32));
len += scnprintf(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu\n",
htt_stats_buf->ppdu_transmitted_bytes_low |
((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
len += scnprintf(buf + len, buf_len - len, "ttl_removed_count = %u\n",
htt_stats_buf->peer_ttl_removed_count);
len += scnprintf(buf + len, buf_len - len, "inactive_time = %u\n\n",
htt_stats_buf->inactive_time);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_peer_details_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_peer_details_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "peer_type = %u\n",
htt_stats_buf->peer_type);
len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
htt_stats_buf->sw_peer_id);
len += scnprintf(buf + len, buf_len - len, "vdev_id = %lu\n",
FIELD_GET(HTT_PEER_DETAILS_VDEV_ID,
htt_stats_buf->vdev_pdev_ast_idx));
len += scnprintf(buf + len, buf_len - len, "pdev_id = %lu\n",
FIELD_GET(HTT_PEER_DETAILS_PDEV_ID,
htt_stats_buf->vdev_pdev_ast_idx));
len += scnprintf(buf + len, buf_len - len, "ast_idx = %lu\n",
FIELD_GET(HTT_PEER_DETAILS_AST_IDX,
htt_stats_buf->vdev_pdev_ast_idx));
len += scnprintf(buf + len, buf_len - len,
"mac_addr = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
FIELD_GET(HTT_MAC_ADDR_L32_0,
htt_stats_buf->mac_addr.mac_addr_l32),
FIELD_GET(HTT_MAC_ADDR_L32_1,
htt_stats_buf->mac_addr.mac_addr_l32),
FIELD_GET(HTT_MAC_ADDR_L32_2,
htt_stats_buf->mac_addr.mac_addr_l32),
FIELD_GET(HTT_MAC_ADDR_L32_3,
htt_stats_buf->mac_addr.mac_addr_l32),
FIELD_GET(HTT_MAC_ADDR_H16_0,
htt_stats_buf->mac_addr.mac_addr_h16),
FIELD_GET(HTT_MAC_ADDR_H16_1,
htt_stats_buf->mac_addr.mac_addr_h16));
len += scnprintf(buf + len, buf_len - len, "peer_flags = 0x%x\n",
htt_stats_buf->peer_flags);
len += scnprintf(buf + len, buf_len - len, "qpeer_flags = 0x%x\n\n",
htt_stats_buf->qpeer_flags);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 j;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
htt_stats_buf->tx_ldpc);
len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
htt_stats_buf->rts_cnt);
len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
htt_stats_buf->ack_rssi);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_mcs, "tx_su_mcs",
HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mu_mcs, "tx_mu_mcs",
HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
len += scnprintf(buf + len, buf_len - len,
"tx_gi[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
HTT_TX_PEER_STATS_NUM_MCS_COUNTERS, "\n");
}
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 j;
len += scnprintf(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
htt_stats_buf->nsts);
len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
htt_stats_buf->rx_ldpc);
len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
htt_stats_buf->rts_cnt);
len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
htt_stats_buf->rssi_mgmt);
len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
htt_stats_buf->rssi_data);
len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
htt_stats_buf->rssi_comb);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
len += scnprintf(buf + len, (buf_len - len),
"rssi_chain[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
HTT_RX_PEER_STATS_NUM_BW_COUNTERS, "\n");
}
for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
len += scnprintf(buf + len, (buf_len - len),
"rx_gi[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
htt_stats_buf->mu_mimo_sch_posted);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
htt_stats_buf->mu_mimo_sch_failed);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
htt_stats_buf->mu_mimo_ppdu_posted);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u\n",
htt_stats_buf->mu_mimo_mpdus_queued_usr);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u\n",
htt_stats_buf->mu_mimo_mpdus_tried_usr);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u\n",
htt_stats_buf->mu_mimo_mpdus_failed_usr);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u\n",
htt_stats_buf->mu_mimo_mpdus_requeued_usr);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u\n",
htt_stats_buf->mu_mimo_err_no_ba_usr);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u\n",
htt_stats_buf->mu_mimo_mpdu_underrun_usr);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n\n",
htt_stats_buf->mu_mimo_ampdu_underrun_usr);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_mu_mimo_cmn_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
htt_stats_buf->mac_id__hwq_id__word));
len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n\n",
FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
htt_stats_buf->mac_id__hwq_id__word));
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
/* TODO: HKDBG */
len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
htt_stats_buf->mac_id__hwq_id__word));
len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n",
FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
htt_stats_buf->mac_id__hwq_id__word));
len += scnprintf(buf + len, buf_len - len, "xretry = %u\n",
htt_stats_buf->xretry);
len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n",
htt_stats_buf->underrun_cnt);
len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n",
htt_stats_buf->flush_cnt);
len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n",
htt_stats_buf->filt_cnt);
len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n",
htt_stats_buf->null_mpdu_bmap);
len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n",
htt_stats_buf->user_ack_failure);
len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
htt_stats_buf->ack_tlv_proc);
len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n",
htt_stats_buf->sched_id_proc);
len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n",
htt_stats_buf->null_mpdu_tx_count);
len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n",
htt_stats_buf->mpdu_bmap_not_recvd);
len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n",
htt_stats_buf->num_bar);
len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
htt_stats_buf->rts);
len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
htt_stats_buf->cts2self);
len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
htt_stats_buf->qos_null);
len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n",
htt_stats_buf->mpdu_tried_cnt);
len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n",
htt_stats_buf->mpdu_queued_cnt);
len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n",
htt_stats_buf->mpdu_ack_fail_cnt);
len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n",
htt_stats_buf->mpdu_filt_cnt);
len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n",
htt_stats_buf->false_mpdu_ack_count);
len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n\n",
htt_stats_buf->txq_timeout);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_difs_latency_stats_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:\n");
len += scnprintf(buf + len, buf_len - len, "hist_intvl = %u\n",
htt_stats_buf->hist_intvl);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->difs_latency_hist,
"difs_latency_hist", data_len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_cmd_result_stats_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 data_len;
data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_result, "cmd_result",
data_len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_cmd_stall_stats_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems;
num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_stall_status,
"cmd_stall_status", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_fes_result_stats_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems;
num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fes_result, "fes_result",
num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u32 num_elements = ((tag_len -
sizeof(htt_stats_buf->hist_bin_size)) >> 2);
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:\n");
len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
htt_stats_buf->hist_bin_size);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
"tried_mpdu_cnt_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_hwq_txop_used_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u32 num_elements = tag_len >> 2;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->txop_used_cnt_hist,
"txop_used_cnt_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
s32 i;
const struct htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
const u32 *cbf_20 = htt_stats_buf->cbf_20;
const u32 *cbf_40 = htt_stats_buf->cbf_40;
const u32 *cbf_80 = htt_stats_buf->cbf_80;
const u32 *cbf_160 = htt_stats_buf->cbf_160;
if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
len += scnprintf(buf + len, buf_len - len,
"\nHTT_TX_AC_SOUNDING_STATS_TLV:\n\n");
len += scnprintf(buf + len, buf_len - len,
"ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
len += scnprintf(buf + len, buf_len - len,
"ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
len += scnprintf(buf + len, buf_len - len,
"ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
len += scnprintf(buf + len, buf_len - len,
"ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
i,
htt_stats_buf->sounding[0],
htt_stats_buf->sounding[1],
htt_stats_buf->sounding[2],
htt_stats_buf->sounding[3]);
}
} else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
len += scnprintf(buf + len, buf_len - len,
"\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len,
"ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
len += scnprintf(buf + len, buf_len - len,
"ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
len += scnprintf(buf + len, buf_len - len,
"ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
len += scnprintf(buf + len, buf_len - len,
"ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
i,
htt_stats_buf->sounding[0],
htt_stats_buf->sounding[1],
htt_stats_buf->sounding[2],
htt_stats_buf->sounding[3]);
}
}
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n",
htt_stats_buf->su_bar);
len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
htt_stats_buf->rts);
len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
htt_stats_buf->cts2self);
len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
htt_stats_buf->qos_null);
len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n",
htt_stats_buf->delayed_bar_1);
len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n",
htt_stats_buf->delayed_bar_2);
len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n",
htt_stats_buf->delayed_bar_3);
len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n",
htt_stats_buf->delayed_bar_4);
len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n",
htt_stats_buf->delayed_bar_5);
len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n",
htt_stats_buf->delayed_bar_6);
len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n",
htt_stats_buf->delayed_bar_7);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa = %u\n",
htt_stats_buf->ac_su_ndpa);
len += scnprintf(buf + len, buf_len - len, "ac_su_ndp = %u\n",
htt_stats_buf->ac_su_ndp);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u\n",
htt_stats_buf->ac_mu_mimo_ndpa);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u\n",
htt_stats_buf->ac_mu_mimo_ndp);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u\n",
htt_stats_buf->ac_mu_mimo_brpoll_1);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u\n",
htt_stats_buf->ac_mu_mimo_brpoll_2);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n\n",
htt_stats_buf->ac_mu_mimo_brpoll_3);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa = %u\n",
htt_stats_buf->ax_su_ndpa);
len += scnprintf(buf + len, buf_len - len, "ax_su_ndp = %u\n",
htt_stats_buf->ax_su_ndp);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u\n",
htt_stats_buf->ax_mu_mimo_ndpa);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u\n",
htt_stats_buf->ax_mu_mimo_ndp);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u\n",
htt_stats_buf->ax_mu_mimo_brpoll_1);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u\n",
htt_stats_buf->ax_mu_mimo_brpoll_2);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u\n",
htt_stats_buf->ax_mu_mimo_brpoll_3);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u\n",
htt_stats_buf->ax_mu_mimo_brpoll_4);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u\n",
htt_stats_buf->ax_mu_mimo_brpoll_5);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u\n",
htt_stats_buf->ax_mu_mimo_brpoll_6);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u\n",
htt_stats_buf->ax_mu_mimo_brpoll_7);
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
htt_stats_buf->ax_basic_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger = %u\n",
htt_stats_buf->ax_ulmumimo_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
htt_stats_buf->ax_bsr_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
htt_stats_buf->ax_mu_bar_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n",
htt_stats_buf->ax_mu_rts_trigger);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n",
htt_stats_buf->ac_su_ndp_err);
len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n",
htt_stats_buf->ac_su_ndpa_err);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n",
htt_stats_buf->ac_mu_mimo_ndpa_err);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n",
htt_stats_buf->ac_mu_mimo_ndp_err);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n",
htt_stats_buf->ac_mu_mimo_brp1_err);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n",
htt_stats_buf->ac_mu_mimo_brp2_err);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n",
htt_stats_buf->ac_mu_mimo_brp3_err);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n",
htt_stats_buf->ax_su_ndp_err);
len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n",
htt_stats_buf->ax_su_ndpa_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n",
htt_stats_buf->ax_mu_mimo_ndpa_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n",
htt_stats_buf->ax_mu_mimo_ndp_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u\n",
htt_stats_buf->ax_mu_mimo_brp1_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u\n",
htt_stats_buf->ax_mu_mimo_brp2_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u\n",
htt_stats_buf->ax_mu_mimo_brp3_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u\n",
htt_stats_buf->ax_mu_mimo_brp4_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u\n",
htt_stats_buf->ax_mu_mimo_brp5_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u\n",
htt_stats_buf->ax_mu_mimo_brp6_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u\n",
htt_stats_buf->ax_mu_mimo_brp7_err);
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
htt_stats_buf->ax_basic_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger_err = %u\n",
htt_stats_buf->ax_ulmumimo_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
htt_stats_buf->ax_bsr_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
htt_stats_buf->ax_mu_bar_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n",
htt_stats_buf->ax_mu_rts_trigger_err);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
htt_stats_buf->mu_mimo_sch_posted);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
htt_stats_buf->mu_mimo_sch_failed);
len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
htt_stats_buf->mu_mimo_ppdu_posted);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_sch_posted_per_group_index %u = %u\n",
i, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz[i]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_sch_posted_per_group_index %u = %u\n",
i, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz[i]);
len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_sch_nusers_%u = %u\n",
i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_ofdma_basic_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_ofdma_basic_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_ofdma_bsr_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_ofdma_bsr_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_ofdma_sch_bar_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_ofdma_bar_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_ofdma_brp_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_ofdma_brp_sch_nusers[i]);
}
len += scnprintf(buf + len, buf_len - len, "\n11ax UL MUMIO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ul_mumimo_basic_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_mumimo_basic_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_mumimo_brp_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_mumimo_brp_sch_nusers[i]);
}
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
if (!htt_stats_buf->user_index)
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
if (htt_stats_buf->user_index <
HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_mpdus_queued_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_queued_usr);
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_mpdus_tried_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_tried_usr);
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_mpdus_failed_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_failed_usr);
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_mpdus_requeued_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_requeued_usr);
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_err_no_ba_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->err_no_ba_usr);
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_mpdu_underrun_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdu_underrun_usr);
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
htt_stats_buf->user_index,
htt_stats_buf->ampdu_underrun_usr);
}
}
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
if (!htt_stats_buf->user_index)
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
if (htt_stats_buf->user_index <
HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_mpdus_queued_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_queued_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_mpdus_tried_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_tried_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_mpdus_failed_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_failed_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_mpdus_requeued_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_requeued_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_err_no_ba_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->err_no_ba_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_mpdu_underrun_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdu_underrun_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
htt_stats_buf->user_index,
htt_stats_buf->ampdu_underrun_usr);
}
}
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
if (!htt_stats_buf->user_index)
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
len += scnprintf(buf + len, buf_len - len,
"ax_mu_ofdma_mpdus_queued_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_queued_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_ofdma_mpdus_tried_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_tried_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_ofdma_mpdus_failed_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_failed_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdus_requeued_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_ofdma_err_no_ba_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->err_no_ba_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n",
htt_stats_buf->user_index,
htt_stats_buf->mpdu_underrun_usr);
len += scnprintf(buf + len, buf_len - len,
"ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n",
htt_stats_buf->user_index,
htt_stats_buf->ampdu_underrun_usr);
}
}
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_sched_txq_cmd_posted_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_posted,
"sched_cmd_posted", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_sched_txq_cmd_reaped_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_reaped,
"sched_cmd_reaped", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_sched_txq_sched_order_su_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
/* each entry is u32, i.e. 4 bytes */
u32 sched_order_su_num_entries =
min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
len += scnprintf(buf + len, buf_len - len,
"HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_order_su, "sched_order_su",
sched_order_su_num_entries, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_sched_txq_sched_ineligibility_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
/* each entry is u32, i.e. 4 bytes */
u32 sched_ineligibility_num_entries = tag_len >> 2;
len += scnprintf(buf + len, buf_len - len,
"HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_ineligibility,
"sched_ineligibility", sched_ineligibility_num_entries,
"\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID,
htt_stats_buf->mac_id__txq_id__word));
len += scnprintf(buf + len, buf_len - len, "txq_id = %lu\n",
FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID,
htt_stats_buf->mac_id__txq_id__word));
len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
htt_stats_buf->sched_policy);
len += scnprintf(buf + len, buf_len - len,
"last_sched_cmd_posted_timestamp = %u\n",
htt_stats_buf->last_sched_cmd_posted_timestamp);
len += scnprintf(buf + len, buf_len - len,
"last_sched_cmd_compl_timestamp = %u\n",
htt_stats_buf->last_sched_cmd_compl_timestamp);
len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
htt_stats_buf->sched_2_tac_lwm_count);
len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
htt_stats_buf->sched_2_tac_ring_full);
len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
htt_stats_buf->sched_cmd_post_failure);
len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
htt_stats_buf->num_active_tids);
len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
htt_stats_buf->num_ps_schedules);
len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
htt_stats_buf->sched_cmds_pending);
len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
htt_stats_buf->num_tid_register);
len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
htt_stats_buf->num_tid_unregister);
len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
htt_stats_buf->num_qstats_queried);
len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
htt_stats_buf->qstats_update_pending);
len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
htt_stats_buf->last_qstats_query_timestamp);
len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
htt_stats_buf->num_tqm_cmdq_full);
len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
htt_stats_buf->num_de_sched_algo_trigger);
len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
htt_stats_buf->num_rt_sched_algo_trigger);
len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
htt_stats_buf->num_tqm_sched_algo_trigger);
len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n\n",
htt_stats_buf->notify_sched);
len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n\n",
htt_stats_buf->dur_based_sendn_term);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
htt_stats_buf->current_timestamp);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_tqm_gen_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elements = min_t(u16, (tag_len >> 2),
HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->gen_mpdu_end_reason,
"gen_mpdu_end_reason", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_tqm_list_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_end_reason,
"list_mpdu_end_reason", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_tqm_list_mpdu_cnt_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2),
HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_cnt_hist,
"list_mpdu_cnt_hist", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_tqm_pdev_stats_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
htt_stats_buf->msdu_count);
len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
htt_stats_buf->mpdu_count);
len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
htt_stats_buf->remove_msdu);
len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
htt_stats_buf->remove_mpdu);
len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
htt_stats_buf->remove_msdu_ttl);
len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
htt_stats_buf->send_bar);
len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
htt_stats_buf->bar_sync);
len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
htt_stats_buf->notify_mpdu);
len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
htt_stats_buf->sync_cmd);
len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
htt_stats_buf->write_cmd);
len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
htt_stats_buf->hwsch_trigger);
len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
htt_stats_buf->ack_tlv_proc);
len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
htt_stats_buf->gen_mpdu_cmd);
len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
htt_stats_buf->gen_list_cmd);
len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
htt_stats_buf->remove_mpdu_cmd);
len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
htt_stats_buf->remove_mpdu_tried_cmd);
len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
htt_stats_buf->mpdu_queue_stats_cmd);
len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
htt_stats_buf->mpdu_head_info_cmd);
len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
htt_stats_buf->msdu_flow_stats_cmd);
len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
htt_stats_buf->remove_msdu_cmd);
len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
htt_stats_buf->remove_msdu_ttl_cmd);
len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
htt_stats_buf->flush_cache_cmd);
len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
htt_stats_buf->update_mpduq_cmd);
len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
htt_stats_buf->enqueue);
len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
htt_stats_buf->enqueue_notify);
len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
htt_stats_buf->notify_mpdu_at_head);
len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
htt_stats_buf->notify_mpdu_state_valid);
len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
htt_stats_buf->sched_udp_notify1);
len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
htt_stats_buf->sched_udp_notify2);
len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
htt_stats_buf->sched_nonudp_notify1);
len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
htt_stats_buf->sched_nonudp_notify2);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
htt_stats_buf->max_cmdq_id);
len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
htt_stats_buf->list_mpdu_cnt_hist_intvl);
len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
htt_stats_buf->add_msdu);
len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
htt_stats_buf->q_empty);
len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
htt_stats_buf->q_not_empty);
len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
htt_stats_buf->drop_notification);
len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n\n",
htt_stats_buf->desc_threshold);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
htt_stats_buf->q_empty_failure);
len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
htt_stats_buf->q_not_empty_failure);
len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
htt_stats_buf->add_msdu_failure);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_tqm_cmdq_status_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_MAC_ID,
htt_stats_buf->mac_id__cmdq_id__word));
len += scnprintf(buf + len, buf_len - len, "cmdq_id = %lu\n\n",
FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID,
htt_stats_buf->mac_id__cmdq_id__word));
len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
htt_stats_buf->sync_cmd);
len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
htt_stats_buf->write_cmd);
len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
htt_stats_buf->gen_mpdu_cmd);
len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
htt_stats_buf->mpdu_queue_stats_cmd);
len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
htt_stats_buf->mpdu_head_info_cmd);
len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
htt_stats_buf->msdu_flow_stats_cmd);
len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
htt_stats_buf->remove_mpdu_cmd);
len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
htt_stats_buf->remove_msdu_cmd);
len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
htt_stats_buf->flush_cache_cmd);
len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
htt_stats_buf->update_mpduq_cmd);
len += scnprintf(buf + len, buf_len - len, "update_msduq_cmd = %u\n\n",
htt_stats_buf->update_msduq_cmd);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
htt_stats_buf->m1_packets);
len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
htt_stats_buf->m2_packets);
len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
htt_stats_buf->m3_packets);
len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
htt_stats_buf->m4_packets);
len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
htt_stats_buf->g1_packets);
len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n\n",
htt_stats_buf->g2_packets);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
htt_stats_buf->ap_bss_peer_not_found);
len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
htt_stats_buf->ap_bcast_mcast_no_peer);
len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
htt_stats_buf->sta_delete_in_progress);
len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
htt_stats_buf->ibss_no_bss_peer);
len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
htt_stats_buf->invalid_vdev_type);
len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
htt_stats_buf->invalid_ast_peer_entry);
len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
htt_stats_buf->peer_entry_invalid);
len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
htt_stats_buf->ethertype_not_ip);
len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
htt_stats_buf->eapol_lookup_failed);
len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
htt_stats_buf->qpeer_not_allow_data);
len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
htt_stats_buf->fse_tid_override);
len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
htt_stats_buf->ipv6_jumbogram_zero_length);
len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n\n",
htt_stats_buf->qos_to_non_qos_in_prog);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
htt_stats_buf->arp_packets);
len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
htt_stats_buf->igmp_packets);
len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
htt_stats_buf->dhcp_packets);
len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
htt_stats_buf->host_inspected);
len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
htt_stats_buf->htt_included);
len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
htt_stats_buf->htt_valid_mcs);
len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
htt_stats_buf->htt_valid_nss);
len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
htt_stats_buf->htt_valid_preamble_type);
len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
htt_stats_buf->htt_valid_chainmask);
len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
htt_stats_buf->htt_valid_guard_interval);
len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
htt_stats_buf->htt_valid_retries);
len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
htt_stats_buf->htt_valid_bw_info);
len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
htt_stats_buf->htt_valid_power);
len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
htt_stats_buf->htt_valid_key_flags);
len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
htt_stats_buf->htt_valid_no_encryption);
len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
htt_stats_buf->fse_entry_count);
len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
htt_stats_buf->fse_priority_be);
len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
htt_stats_buf->fse_priority_high);
len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
htt_stats_buf->fse_priority_low);
len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
htt_stats_buf->fse_traffic_ptrn_be);
len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
htt_stats_buf->fse_traffic_ptrn_over_sub);
len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
htt_stats_buf->fse_traffic_ptrn_bursty);
len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
htt_stats_buf->fse_traffic_ptrn_interactive);
len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
htt_stats_buf->fse_traffic_ptrn_periodic);
len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
htt_stats_buf->fse_hwqueue_alloc);
len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
htt_stats_buf->fse_hwqueue_created);
len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
htt_stats_buf->fse_hwqueue_send_to_host);
len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
htt_stats_buf->mcast_entry);
len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
htt_stats_buf->bcast_entry);
len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
htt_stats_buf->htt_update_peer_cache);
len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
htt_stats_buf->htt_learning_frame);
len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
htt_stats_buf->fse_invalid_peer);
len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n",
htt_stats_buf->mec_notify);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
htt_stats_buf->eok);
len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
htt_stats_buf->classify_done);
len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
htt_stats_buf->lookup_failed);
len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
htt_stats_buf->send_host_dhcp);
len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
htt_stats_buf->send_host_mcast);
len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
htt_stats_buf->send_host_unknown_dest);
len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
htt_stats_buf->send_host);
len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n",
htt_stats_buf->status_invalid);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
htt_stats_buf->enqueued_pkts);
len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
htt_stats_buf->to_tqm);
len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n",
htt_stats_buf->to_tqm_bypass);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
htt_stats_buf->discarded_pkts);
len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
htt_stats_buf->local_frames);
len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n",
htt_stats_buf->is_ext_msdu);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
htt_stats_buf->tcl_dummy_frame);
len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
htt_stats_buf->tqm_dummy_frame);
len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
htt_stats_buf->tqm_notify_frame);
len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
htt_stats_buf->fw2wbm_enq);
len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n",
htt_stats_buf->tqm_bypass_frame);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_de_fw2wbm_ring_full_hist_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elements = tag_len >> 2;
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw2wbm_ring_full_hist,
"fw2wbm_ring_full_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
htt_stats_buf->tcl2fw_entry_count);
len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
htt_stats_buf->not_to_fw);
len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
htt_stats_buf->invalid_pdev_vdev_peer);
len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
htt_stats_buf->tcl_res_invalid_addrx);
len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
htt_stats_buf->wbm2fw_entry_count);
len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n\n",
htt_stats_buf->invalid_pdev);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_ring_if_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "base_addr = %u\n",
htt_stats_buf->base_addr);
len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
htt_stats_buf->elem_size);
len += scnprintf(buf + len, buf_len - len, "num_elems = %lu\n",
FIELD_GET(HTT_RING_IF_STATS_NUM_ELEMS,
htt_stats_buf->num_elems__prefetch_tail_idx));
len += scnprintf(buf + len, buf_len - len, "prefetch_tail_idx = %lu\n",
FIELD_GET(HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX,
htt_stats_buf->num_elems__prefetch_tail_idx));
len += scnprintf(buf + len, buf_len - len, "head_idx = %lu\n",
FIELD_GET(HTT_RING_IF_STATS_HEAD_IDX,
htt_stats_buf->head_idx__tail_idx));
len += scnprintf(buf + len, buf_len - len, "tail_idx = %lu\n",
FIELD_GET(HTT_RING_IF_STATS_TAIL_IDX,
htt_stats_buf->head_idx__tail_idx));
len += scnprintf(buf + len, buf_len - len, "shadow_head_idx = %lu\n",
FIELD_GET(HTT_RING_IF_STATS_SHADOW_HEAD_IDX,
htt_stats_buf->shadow_head_idx__shadow_tail_idx));
len += scnprintf(buf + len, buf_len - len, "shadow_tail_idx = %lu\n",
FIELD_GET(HTT_RING_IF_STATS_SHADOW_TAIL_IDX,
htt_stats_buf->shadow_head_idx__shadow_tail_idx));
len += scnprintf(buf + len, buf_len - len, "num_tail_incr = %u\n",
htt_stats_buf->num_tail_incr);
len += scnprintf(buf + len, buf_len - len, "lwm_thresh = %lu\n",
FIELD_GET(HTT_RING_IF_STATS_LWM_THRESH,
htt_stats_buf->lwm_thresh__hwm_thresh));
len += scnprintf(buf + len, buf_len - len, "hwm_thresh = %lu\n",
FIELD_GET(HTT_RING_IF_STATS_HWM_THRESH,
htt_stats_buf->lwm_thresh__hwm_thresh));
len += scnprintf(buf + len, buf_len - len, "overrun_hit_count = %u\n",
htt_stats_buf->overrun_hit_count);
len += scnprintf(buf + len, buf_len - len, "underrun_hit_count = %u\n",
htt_stats_buf->underrun_hit_count);
len += scnprintf(buf + len, buf_len - len, "prod_blockwait_count = %u\n",
htt_stats_buf->prod_blockwait_count);
len += scnprintf(buf + len, buf_len - len, "cons_blockwait_count = %u\n",
htt_stats_buf->cons_blockwait_count);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->low_wm_hit_count,
"low_wm_hit_count", HTT_STATS_LOW_WM_BINS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->high_wm_hit_count,
"high_wm_hit_count", HTT_STATS_HIGH_WM_BINS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_ring_if_cmn_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_sfm_client_user_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = tag_len >> 2;
len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dwords_used_by_user_n,
"dwords_used_by_user_n", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_sfm_client_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_sfm_client_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "client_id = %u\n",
htt_stats_buf->client_id);
len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n",
htt_stats_buf->buf_min);
len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n",
htt_stats_buf->buf_max);
len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n",
htt_stats_buf->buf_busy);
len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n",
htt_stats_buf->buf_alloc);
len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n",
htt_stats_buf->buf_avail);
len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n",
htt_stats_buf->num_users);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_sfm_cmn_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n",
htt_stats_buf->buf_total);
len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n",
htt_stats_buf->mem_empty);
len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n",
htt_stats_buf->deallocate_bufs);
len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_sring_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_sring_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_SRING_STATS_MAC_ID,
htt_stats_buf->mac_id__ring_id__arena__ep));
len += scnprintf(buf + len, buf_len - len, "ring_id = %lu\n",
FIELD_GET(HTT_SRING_STATS_RING_ID,
htt_stats_buf->mac_id__ring_id__arena__ep));
len += scnprintf(buf + len, buf_len - len, "arena = %lu\n",
FIELD_GET(HTT_SRING_STATS_ARENA,
htt_stats_buf->mac_id__ring_id__arena__ep));
len += scnprintf(buf + len, buf_len - len, "ep = %lu\n",
FIELD_GET(HTT_SRING_STATS_EP,
htt_stats_buf->mac_id__ring_id__arena__ep));
len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n",
htt_stats_buf->base_addr_lsb);
len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n",
htt_stats_buf->base_addr_msb);
len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n",
htt_stats_buf->ring_size);
len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
htt_stats_buf->elem_size);
len += scnprintf(buf + len, buf_len - len, "num_avail_words = %lu\n",
FIELD_GET(HTT_SRING_STATS_NUM_AVAIL_WORDS,
htt_stats_buf->num_avail_words__num_valid_words));
len += scnprintf(buf + len, buf_len - len, "num_valid_words = %lu\n",
FIELD_GET(HTT_SRING_STATS_NUM_VALID_WORDS,
htt_stats_buf->num_avail_words__num_valid_words));
len += scnprintf(buf + len, buf_len - len, "head_ptr = %lu\n",
FIELD_GET(HTT_SRING_STATS_HEAD_PTR,
htt_stats_buf->head_ptr__tail_ptr));
len += scnprintf(buf + len, buf_len - len, "tail_ptr = %lu\n",
FIELD_GET(HTT_SRING_STATS_TAIL_PTR,
htt_stats_buf->head_ptr__tail_ptr));
len += scnprintf(buf + len, buf_len - len, "consumer_empty = %lu\n",
FIELD_GET(HTT_SRING_STATS_CONSUMER_EMPTY,
htt_stats_buf->consumer_empty__producer_full));
len += scnprintf(buf + len, buf_len - len, "producer_full = %lu\n",
FIELD_GET(HTT_SRING_STATS_PRODUCER_FULL,
htt_stats_buf->consumer_empty__producer_full));
len += scnprintf(buf + len, buf_len - len, "prefetch_count = %lu\n",
FIELD_GET(HTT_SRING_STATS_PREFETCH_COUNT,
htt_stats_buf->prefetch_count__internal_tail_ptr));
len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %lu\n\n",
FIELD_GET(HTT_SRING_STATS_INTERNAL_TAIL_PTR,
htt_stats_buf->prefetch_count__internal_tail_ptr));
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_sring_cmn_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 j;
len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
htt_stats_buf->tx_ldpc);
len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
htt_stats_buf->ac_mu_mimo_tx_ldpc);
len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
htt_stats_buf->ax_mu_mimo_tx_ldpc);
len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
htt_stats_buf->ofdma_tx_ldpc);
len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
htt_stats_buf->rts_cnt);
len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
htt_stats_buf->rts_success);
len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
htt_stats_buf->ack_rssi);
len += scnprintf(buf + len, buf_len - len,
"Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u\n",
htt_stats_buf->tx_legacy_cck_rate[0],
htt_stats_buf->tx_legacy_cck_rate[1],
htt_stats_buf->tx_legacy_cck_rate[2],
htt_stats_buf->tx_legacy_cck_rate[3]);
len += scnprintf(buf + len, buf_len - len,
"Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
" 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
htt_stats_buf->tx_legacy_ofdm_rate[0],
htt_stats_buf->tx_legacy_ofdm_rate[1],
htt_stats_buf->tx_legacy_ofdm_rate[2],
htt_stats_buf->tx_legacy_ofdm_rate[3],
htt_stats_buf->tx_legacy_ofdm_rate[4],
htt_stats_buf->tx_legacy_ofdm_rate[5],
htt_stats_buf->tx_legacy_ofdm_rate[6],
htt_stats_buf->tx_legacy_ofdm_rate[7]);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_mcs,
"ac_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs,
"ax_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs, "ofdma_tx_mcs",
HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_nss,
"ac_mu_mimo_tx_nss",
HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_nss,
"ax_mu_mimo_tx_nss",
HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_nss, "ofdma_tx_nss",
HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_bw,
"ac_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_bw,
"ax_mu_mimo_tx_bw",
HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_bw, "ofdma_tx_bw",
HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
htt_stats_buf->tx_he_ltf[1],
htt_stats_buf->tx_he_ltf[2],
htt_stats_buf->tx_he_ltf[3]);
/* SU GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
len += scnprintf(buf + len, (buf_len - len),
"tx_gi[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* AC MU-MIMO GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
len += scnprintf(buf + len, (buf_len - len),
"ac_mu_mimo_tx_gi[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_gi[j],
NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* AX MU-MIMO GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
len += scnprintf(buf + len, (buf_len - len),
"ax_mu_mimo_tx_gi[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi[j],
NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* DL OFDMA GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
len += scnprintf(buf + len, (buf_len - len),
"ofdma_tx_gi[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi[j], NULL,
HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i, j;
len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
htt_stats_buf->nsts);
len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
htt_stats_buf->rx_ldpc);
len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
htt_stats_buf->rts_cnt);
len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
htt_stats_buf->rssi_mgmt);
len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
htt_stats_buf->rssi_data);
len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
htt_stats_buf->rssi_comb);
len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
htt_stats_buf->rssi_in_dbm);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
htt_stats_buf->nss_count);
len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
htt_stats_buf->pilot_count);
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
len += scnprintf(buf + len, buf_len - len,
"pilot_evm_db[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
len += scnprintf(buf + len,
buf_len - len,
" %u:%d,",
i,
htt_stats_buf->rx_pilot_evm_db[j][i]);
len += scnprintf(buf + len, buf_len - len, "\n");
}
len += scnprintf(buf + len, buf_len - len,
"pilot_evm_db_mean = ");
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
len += scnprintf(buf + len,
buf_len - len,
" %u:%d,", i,
htt_stats_buf->rx_pilot_evm_db_mean[i]);
len += scnprintf(buf + len, buf_len - len, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
len += scnprintf(buf + len, buf_len - len,
"rssi_chain[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
}
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
len += scnprintf(buf + len, buf_len - len,
"rx_gi[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
htt_stats_buf->rx_11ax_su_ext);
len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
htt_stats_buf->rx_11ac_mumimo);
len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
htt_stats_buf->rx_11ax_mumimo);
len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
htt_stats_buf->rx_11ax_ofdma);
len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
htt_stats_buf->txbf);
len += scnprintf(buf + len, buf_len - len, "\nrx_su_ndpa = %u",
htt_stats_buf->rx_su_ndpa);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_su_txbf_mcs,
"rx_11ax_su_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
"\n");
len += scnprintf(buf + len, buf_len - len, "\nrx_mu_ndpa = %u",
htt_stats_buf->rx_mu_ndpa);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_mu_txbf_mcs,
"rx_11ax_mu_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
"\n");
len += scnprintf(buf + len, buf_len - len, "\nrx_br_poll = %u",
htt_stats_buf->rx_br_poll);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
"rx_legacy_cck_rate",
HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_ofdm_rate,
"rx_legacy_ofdm_rate",
HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
htt_stats_buf->rx_active_dur_us_low);
len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
htt_stats_buf->rx_active_dur_us_high);
len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
htt_stats_buf->rx_11ax_ul_ofdma);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs,
"ul_ofdma_rx_mcs",
HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
len += scnprintf(buf + len, buf_len - len,
"ul_ofdma_rx_gi[%u] = ", j);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi[j], NULL,
HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_nss,
"ul_ofdma_rx_nss",
HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_bw, "ul_ofdma_rx_bw",
HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
htt_stats_buf->ul_ofdma_rx_stbc);
len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
htt_stats_buf->ul_ofdma_rx_ldpc);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_ppdu,
"rx_ulofdma_non_data_ppdu",
HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_ppdu,
"rx_ulofdma_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_ok,
"rx_ulofdma_mpdu_ok", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_fail,
"rx_ulofdma_mpdu_fail", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
len += scnprintf(buf + len, buf_len - len,
"rx_ul_fd_rssi: nss[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
len += scnprintf(buf + len,
buf_len - len,
" %u:%d,",
i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
len += scnprintf(buf + len, buf_len - len, "\n");
}
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_nusers,
"rx_ulofdma_non_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_nusers,
"rx_ulofdma_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_mcs,
"rx_11ax_dl_ofdma_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_ru,
"rx_11ax_dl_ofdma_ru", HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_non_data_ppdu,
"rx_ulmumimo_non_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_data_ppdu,
"rx_ulmumimo_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_ok,
"rx_ulmumimo_mpdu_ok", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_fail,
"rx_ulmumimo_mpdu_fail", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
"\n");
len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
htt_stats_buf->per_chain_rssi_pkt_type);
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
len += scnprintf(buf + len, buf_len - len,
"rx_per_chain_rssi_in_dbm[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
len += scnprintf(buf + len,
buf_len - len,
" %u:%d,",
i,
htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
len += scnprintf(buf + len, buf_len - len, "\n");
}
len += scnprintf(buf + len, buf_len - len, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_soc_fw_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u\n",
htt_stats_buf->fw_reo_ring_data_msdu);
len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u\n",
htt_stats_buf->fw_to_host_data_msdu_bcmc);
len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u\n",
htt_stats_buf->fw_to_host_data_msdu_uc);
len += scnprintf(buf + len, buf_len - len,
"ofld_remote_data_buf_recycle_cnt = %u\n",
htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
len += scnprintf(buf + len, buf_len - len,
"ofld_remote_free_buf_indication_cnt = %u\n",
htt_stats_buf->ofld_remote_free_buf_indication_cnt);
len += scnprintf(buf + len, buf_len - len,
"ofld_buf_to_host_data_msdu_uc = %u\n",
htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
len += scnprintf(buf + len, buf_len - len,
"reo_fw_ring_to_host_data_msdu_uc = %u\n",
htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
len += scnprintf(buf + len, buf_len - len, "wbm_sw_ring_reap = %u\n",
htt_stats_buf->wbm_sw_ring_reap);
len += scnprintf(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u\n",
htt_stats_buf->wbm_forward_to_host_cnt);
len += scnprintf(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u\n",
htt_stats_buf->wbm_target_recycle_cnt);
len += scnprintf(buf + len, buf_len - len,
"target_refill_ring_recycle_cnt = %u\n",
htt_stats_buf->target_refill_ring_recycle_cnt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_soc_fw_refill_ring_empty_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
len += scnprintf(buf + len, buf_len - len,
"HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_empty_cnt,
"refill_ring_empty_cnt", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *htt_stats_buf =
tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
len += scnprintf(buf + len, buf_len - len,
"HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rxdma_err, "rxdma_err",
num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
len += scnprintf(buf + len, buf_len - len,
"HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reo_err, "reo_err",
num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_reo_resource_stats_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "sample_id = %u\n",
htt_stats_buf->sample_id);
len += scnprintf(buf + len, buf_len - len, "total_max = %u\n",
htt_stats_buf->total_max);
len += scnprintf(buf + len, buf_len - len, "total_avg = %u\n",
htt_stats_buf->total_avg);
len += scnprintf(buf + len, buf_len - len, "total_sample = %u\n",
htt_stats_buf->total_sample);
len += scnprintf(buf + len, buf_len - len, "non_zeros_avg = %u\n",
htt_stats_buf->non_zeros_avg);
len += scnprintf(buf + len, buf_len - len, "non_zeros_sample = %u\n",
htt_stats_buf->non_zeros_sample);
len += scnprintf(buf + len, buf_len - len, "last_non_zeros_max = %u\n",
htt_stats_buf->last_non_zeros_max);
len += scnprintf(buf + len, buf_len - len, "last_non_zeros_min %u\n",
htt_stats_buf->last_non_zeros_min);
len += scnprintf(buf + len, buf_len - len, "last_non_zeros_avg %u\n",
htt_stats_buf->last_non_zeros_avg);
len += scnprintf(buf + len, buf_len - len, "last_non_zeros_sample %u\n\n",
htt_stats_buf->last_non_zeros_sample);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
len += scnprintf(buf + len, buf_len - len,
"HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_num_refill,
"refill_ring_num_refill", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n",
htt_stats_buf->ppdu_recvd);
len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n",
htt_stats_buf->mpdu_cnt_fcs_ok);
len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n",
htt_stats_buf->mpdu_cnt_fcs_err);
len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n",
htt_stats_buf->tcp_msdu_cnt);
len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n",
htt_stats_buf->tcp_ack_msdu_cnt);
len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n",
htt_stats_buf->udp_msdu_cnt);
len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n",
htt_stats_buf->other_msdu_cnt);
len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n",
htt_stats_buf->fw_ring_mpdu_ind);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mgmt_subtype,
"fw_ring_mgmt_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_ctrl_subtype,
"fw_ring_ctrl_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n",
htt_stats_buf->fw_ring_mcast_data_msdu);
len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n",
htt_stats_buf->fw_ring_bcast_data_msdu);
len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n",
htt_stats_buf->fw_ring_ucast_data_msdu);
len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n",
htt_stats_buf->fw_ring_null_data_msdu);
len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n",
htt_stats_buf->fw_ring_mpdu_drop);
len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n",
htt_stats_buf->ofld_local_data_ind_cnt);
len += scnprintf(buf + len, buf_len - len,
"ofld_local_data_buf_recycle_cnt = %u\n",
htt_stats_buf->ofld_local_data_buf_recycle_cnt);
len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n",
htt_stats_buf->drx_local_data_ind_cnt);
len += scnprintf(buf + len, buf_len - len,
"drx_local_data_buf_recycle_cnt = %u\n",
htt_stats_buf->drx_local_data_buf_recycle_cnt);
len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n",
htt_stats_buf->local_nondata_ind_cnt);
len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n",
htt_stats_buf->local_nondata_buf_recycle_cnt);
len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n",
htt_stats_buf->fw_status_buf_ring_refill_cnt);
len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n",
htt_stats_buf->fw_status_buf_ring_empty_cnt);
len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n",
htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n",
htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n",
htt_stats_buf->fw_link_buf_ring_refill_cnt);
len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n",
htt_stats_buf->fw_link_buf_ring_empty_cnt);
len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n",
htt_stats_buf->host_pkt_buf_ring_refill_cnt);
len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n",
htt_stats_buf->host_pkt_buf_ring_empty_cnt);
len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n",
htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n",
htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
len += scnprintf(buf + len, buf_len - len,
"mon_status_buf_ring_refill_cnt = %u\n",
htt_stats_buf->mon_status_buf_ring_refill_cnt);
len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n",
htt_stats_buf->mon_status_buf_ring_empty_cnt);
len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n",
htt_stats_buf->mon_desc_buf_ring_refill_cnt);
len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n",
htt_stats_buf->mon_desc_buf_ring_empty_cnt);
len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n",
htt_stats_buf->mon_dest_ring_update_cnt);
len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n",
htt_stats_buf->mon_dest_ring_full_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n",
htt_stats_buf->rx_suspend_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n",
htt_stats_buf->rx_suspend_fail_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n",
htt_stats_buf->rx_resume_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n",
htt_stats_buf->rx_resume_fail_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n",
htt_stats_buf->rx_ring_switch_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n",
htt_stats_buf->rx_ring_restore_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
htt_stats_buf->rx_flush_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n\n",
htt_stats_buf->rx_recovery_reset_cnt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mpdu_err,
"fw_ring_mpdu_err", HTT_RX_STATS_RXDMA_MAX_ERR, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_pdev_fw_mpdu_drop_tlv_v *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_mpdu_drop, "fw_mpdu_drop",
num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_rx_pdev_fw_stats_phy_err_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id__word = %u\n",
htt_stats_buf->mac_id__word);
len += scnprintf(buf + len, buf_len - len, "total_phy_err_nct = %u\n",
htt_stats_buf->total_phy_err_cnt);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_err, "phy_errs",
HTT_STATS_PHY_ERR_MAX, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
htt_stats_buf->chan_num);
len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
htt_stats_buf->num_records);
len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n",
htt_stats_buf->valid_cca_counters_bitmap);
len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n",
htt_stats_buf->collection_interval);
len += scnprintf(buf + len, buf_len - len,
"HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n");
len += scnprintf(buf + len, buf_len - len,
"| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len,
"|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|\n",
htt_stats_buf->tx_frame_usec,
htt_stats_buf->rx_frame_usec,
htt_stats_buf->rx_clear_usec,
htt_stats_buf->my_rx_frame_usec,
htt_stats_buf->usec_cnt,
htt_stats_buf->med_rx_idle_usec,
htt_stats_buf->med_tx_idle_global_usec,
htt_stats_buf->cca_obss_usec);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
htt_stats_buf->last_unpause_ppdu_id);
len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
htt_stats_buf->hwsch_unpause_wait_tqm_write);
len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
htt_stats_buf->hwsch_dummy_tlv_skipped);
len += scnprintf(buf + len, buf_len - len,
"hwsch_misaligned_offset_received = %u\n",
htt_stats_buf->hwsch_misaligned_offset_received);
len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
htt_stats_buf->hwsch_reset_count);
len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
htt_stats_buf->hwsch_dev_reset_war);
len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
htt_stats_buf->hwsch_delayed_pause);
len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
htt_stats_buf->hwsch_long_delayed_pause);
len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
htt_stats_buf->sch_rx_ppdu_no_response);
len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
htt_stats_buf->sch_selfgen_response);
len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
htt_stats_buf->sch_rx_sifs_resp_trigger);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_pdev_stats_twt_sessions_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
htt_stats_buf->pdev_id);
len += scnprintf(buf + len, buf_len - len, "num_sessions = %u\n\n",
htt_stats_buf->num_sessions);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_pdev_stats_twt_session_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
htt_stats_buf->vdev_id);
len += scnprintf(buf + len, buf_len - len,
"peer_mac = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
FIELD_GET(HTT_MAC_ADDR_L32_0,
htt_stats_buf->peer_mac.mac_addr_l32),
FIELD_GET(HTT_MAC_ADDR_L32_1,
htt_stats_buf->peer_mac.mac_addr_l32),
FIELD_GET(HTT_MAC_ADDR_L32_2,
htt_stats_buf->peer_mac.mac_addr_l32),
FIELD_GET(HTT_MAC_ADDR_L32_3,
htt_stats_buf->peer_mac.mac_addr_l32),
FIELD_GET(HTT_MAC_ADDR_H16_0,
htt_stats_buf->peer_mac.mac_addr_h16),
FIELD_GET(HTT_MAC_ADDR_H16_1,
htt_stats_buf->peer_mac.mac_addr_h16));
len += scnprintf(buf + len, buf_len - len, "flow_id_flags = %u\n",
htt_stats_buf->flow_id_flags);
len += scnprintf(buf + len, buf_len - len, "dialog_id = %u\n",
htt_stats_buf->dialog_id);
len += scnprintf(buf + len, buf_len - len, "wake_dura_us = %u\n",
htt_stats_buf->wake_dura_us);
len += scnprintf(buf + len, buf_len - len, "wake_intvl_us = %u\n",
htt_stats_buf->wake_intvl_us);
len += scnprintf(buf + len, buf_len - len, "sp_offset_us = %u\n\n",
htt_stats_buf->sp_offset_us);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void
htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "OBSS Tx success PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_success);
len += scnprintf(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_failure);
len += scnprintf(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
htt_stats_buf->num_non_srg_opportunities);
len += scnprintf(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_tried);
len += scnprintf(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_success);
len += scnprintf(buf + len, buf_len - len, "SRG Opportunities = %u\n",
htt_stats_buf->num_srg_opportunities);
len += scnprintf(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
htt_stats_buf->num_srg_ppdu_tried);
len += scnprintf(buf + len, buf_len - len, "SRG success PPDU = %u\n\n",
htt_stats_buf->num_srg_ppdu_success);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
buf[len] = 0;
stats_req->buf_len = len;
}
static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
u8 *data)
{
struct debug_htt_stats_req *stats_req =
(struct debug_htt_stats_req *)data;
struct htt_ring_backpressure_stats_tlv *htt_stats_buf =
(struct htt_ring_backpressure_stats_tlv *)tag_buf;
int i;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
htt_stats_buf->pdev_id);
len += scnprintf(buf + len, buf_len - len, "current_head_idx = %u\n",
htt_stats_buf->current_head_idx);
len += scnprintf(buf + len, buf_len - len, "current_tail_idx = %u\n",
htt_stats_buf->current_tail_idx);
len += scnprintf(buf + len, buf_len - len, "num_htt_msgs_sent = %u\n",
htt_stats_buf->num_htt_msgs_sent);
len += scnprintf(buf + len, buf_len - len,
"backpressure_time_ms = %u\n",
htt_stats_buf->backpressure_time_ms);
for (i = 0; i < 5; i++)
len += scnprintf(buf + len, buf_len - len,
"backpressure_hist_%u = %u\n",
i + 1, htt_stats_buf->backpressure_hist[i]);
len += scnprintf(buf + len, buf_len - len,
"============================\n");
if (len >= buf_len) {
buf[buf_len - 1] = 0;
stats_req->buf_len = buf_len - 1;
} else {
buf[len] = 0;
stats_req->buf_len = len;
}
}
static inline
void htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
int i;
len += scnprintf(buf + len, buf_len - len,
"HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
len += scnprintf(buf + len, buf_len - len, "tx_ol_mcs = ");
for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
len += scnprintf(buf + len, buf_len - len,
"%d:%u,", i, htt_stats_buf->tx_su_ol_mcs[i]);
len--;
len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_mcs = ");
for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
len += scnprintf(buf + len, buf_len - len,
"%d:%u,", i, htt_stats_buf->tx_su_ibf_mcs[i]);
len--;
len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_mcs =");
for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
len += scnprintf(buf + len, buf_len - len,
"%d:%u,", i, htt_stats_buf->tx_su_txbf_mcs[i]);
len--;
len += scnprintf(buf + len, buf_len - len, "\ntx_ol_nss = ");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
len += scnprintf(buf + len, buf_len - len,
"%d:%u,", i, htt_stats_buf->tx_su_ol_nss[i]);
len--;
len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_nss = ");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
len += scnprintf(buf + len, buf_len - len,
"%d:%u,", i, htt_stats_buf->tx_su_ibf_nss[i]);
len--;
len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_nss = ");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
len += scnprintf(buf + len, buf_len - len,
"%d:%u,", i, htt_stats_buf->tx_su_txbf_nss[i]);
len--;
len += scnprintf(buf + len, buf_len - len, "\ntx_ol_bw = ");
for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
len += scnprintf(buf + len, buf_len - len,
"%d:%u,", i, htt_stats_buf->tx_su_ol_bw[i]);
len--;
len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_bw = ");
for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
len += scnprintf(buf + len, buf_len - len,
"%d:%u,", i, htt_stats_buf->tx_su_ibf_bw[i]);
len--;
len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_bw = ");
for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
len += scnprintf(buf + len, buf_len - len,
"%d:%u,", i, htt_stats_buf->tx_su_txbf_bw[i]);
len--;
len += scnprintf(buf + len, buf_len - len, "\n");
stats_req->buf_len = len;
}
static inline
void htt_print_txbf_ofdma_ndpa_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_txbf_ofdma_ndpa_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
int i;
len += scnprintf(buf + len, buf_len - len,
"HTT_TXBF_OFDMA_NDPA_STATS_TLV:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_ndpa_queued_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_ndpa_queued[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_ndpa_tried_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_ndpa_tried[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_ndpa_flushed_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_ndpa_flushed[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_ndpa_err_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_ndpa_err[i]);
len += scnprintf(buf + len, buf_len - len, "\n");
}
stats_req->buf_len = len;
}
static inline
void htt_print_txbf_ofdma_ndp_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_txbf_ofdma_ndp_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
int i;
len += scnprintf(buf + len, buf_len - len,
"HTT_TXBF_OFDMA_NDP_STATS_TLV:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_ndp_queued_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_ndp_queued[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_ndp_tried_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_ndp_tried[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_ndp_flushed_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_ndp_flushed[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_ndp_err_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_ndp_err[i]);
len += scnprintf(buf + len, buf_len - len, "\n");
}
stats_req->buf_len = len;
}
static inline
void htt_print_txbf_ofdma_brp_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_txbf_ofdma_brp_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
int i;
len += scnprintf(buf + len, buf_len - len,
"HTT_TXBF_OFDMA_BRP_STATS_TLV:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_brpoll_queued_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_brpoll_queued[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_brpoll_tried_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_brpoll_tried[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_brpoll_flushed_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_brpoll_flushed[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_brp_err_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_brp_err[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_brp_err_num_cbf_rcvd_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_brp_err_num_cbf_rcvd[i]);
len += scnprintf(buf + len, buf_len - len, "\n");
}
stats_req->buf_len = len;
}
static inline
void htt_print_txbf_ofdma_steer_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_txbf_ofdma_steer_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
int i;
len += scnprintf(buf + len, buf_len - len,
"HTT_TXBF_OFDMA_STEER_STATS_TLV:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_num_ppdu_steer_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_num_ppdu_steer[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_num_ppdu_ol_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_num_ppdu_ol[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_num_usrs_prefetch_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_num_usrs_prefetch[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_num_usrs_sound_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_num_usrs_sound[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_num_usrs_force_sound_user%d = %u\n",
i, htt_stats_buf->ax_ofdma_num_usrs_force_sound[i]);
len += scnprintf(buf + len, buf_len - len, "\n");
}
stats_req->buf_len = len;
}
static inline
void htt_print_phy_counters_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_phy_counters_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
int i;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
htt_stats_buf->rx_ofdma_timing_err_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
htt_stats_buf->rx_cck_fail_cnt);
len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
htt_stats_buf->mactx_abort_cnt);
len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
htt_stats_buf->macrx_abort_cnt);
len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
htt_stats_buf->phytx_abort_cnt);
len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
htt_stats_buf->phyrx_abort_cnt);
len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
htt_stats_buf->phyrx_defer_abort_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
htt_stats_buf->rx_gain_adj_lstf_event_cnt);
len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
htt_stats_buf->rx_gain_adj_non_legacy_cnt);
for (i = 0; i < HTT_MAX_RX_PKT_CNT; i++)
len += scnprintf(buf + len, buf_len - len, "rx_pkt_cnt[%d] = %u\n",
i, htt_stats_buf->rx_pkt_cnt[i]);
for (i = 0; i < HTT_MAX_RX_PKT_CRC_PASS_CNT; i++)
len += scnprintf(buf + len, buf_len - len,
"rx_pkt_crc_pass_cnt[%d] = %u\n",
i, htt_stats_buf->rx_pkt_crc_pass_cnt[i]);
for (i = 0; i < HTT_MAX_PER_BLK_ERR_CNT; i++)
len += scnprintf(buf + len, buf_len - len,
"per_blk_err_cnt[%d] = %u\n",
i, htt_stats_buf->per_blk_err_cnt[i]);
for (i = 0; i < HTT_MAX_RX_OTA_ERR_CNT; i++)
len += scnprintf(buf + len, buf_len - len,
"rx_ota_err_cnt[%d] = %u\n",
i, htt_stats_buf->rx_ota_err_cnt[i]);
stats_req->buf_len = len;
}
static inline
void htt_print_phy_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_phy_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
int i;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
for (i = 0; i < HTT_STATS_MAX_CHAINS; i++)
len += scnprintf(buf + len, buf_len - len, "nf_chain[%d] = %d\n",
i, htt_stats_buf->nf_chain[i]);
len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u\n",
htt_stats_buf->false_radar_cnt);
len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
htt_stats_buf->radar_cs_cnt);
len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n",
htt_stats_buf->ani_level);
len += scnprintf(buf + len, buf_len - len, "fw_run_time = %u\n",
htt_stats_buf->fw_run_time);
stats_req->buf_len = len;
}
static inline void
htt_print_phy_reset_counters_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
if (tag_len < sizeof(*htt_stats_buf))
return;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
htt_stats_buf->pdev_id);
len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
htt_stats_buf->cf_active_low_fail_cnt);
len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
htt_stats_buf->cf_active_low_pass_cnt);
len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
htt_stats_buf->phy_off_through_vreg_cnt);
len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
htt_stats_buf->force_calibration_cnt);
len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
htt_stats_buf->rf_mode_switch_phy_off_cnt);
stats_req->buf_len = len;
}
static inline void
htt_print_phy_reset_stats_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
if (tag_len < sizeof(*htt_stats_buf))
return;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
htt_stats_buf->pdev_id);
len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
htt_stats_buf->chan_mhz);
len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
htt_stats_buf->chan_band_center_freq1);
len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
htt_stats_buf->chan_band_center_freq2);
len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
htt_stats_buf->chan_phy_mode);
len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
htt_stats_buf->chan_flags);
len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
htt_stats_buf->chan_num);
len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
htt_stats_buf->reset_cause);
len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
htt_stats_buf->prev_reset_cause);
len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
htt_stats_buf->phy_warm_reset_src);
len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
htt_stats_buf->rx_gain_tbl_mode);
len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
htt_stats_buf->xbar_val);
len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
htt_stats_buf->force_calibration);
len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
htt_stats_buf->phyrf_mode);
len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
htt_stats_buf->phy_homechan);
len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
htt_stats_buf->phy_tx_ch_mask);
len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
htt_stats_buf->phy_rx_ch_mask);
len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
htt_stats_buf->phybb_ini_mask);
len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
htt_stats_buf->phyrf_ini_mask);
len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
htt_stats_buf->phy_dfs_en_mask);
len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
htt_stats_buf->phy_sscan_en_mask);
len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
htt_stats_buf->phy_synth_sel_mask);
len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
htt_stats_buf->phy_adfs_freq);
len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
htt_stats_buf->cck_fir_settings);
len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
htt_stats_buf->phy_dyn_pri_chan);
len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
htt_stats_buf->cca_thresh);
len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
htt_stats_buf->dyn_cca_status);
len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
htt_stats_buf->rxdesense_thresh_hw);
len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n",
htt_stats_buf->rxdesense_thresh_sw);
stats_req->buf_len = len;
}
static inline
void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
{
const struct htt_peer_ctrl_path_txrx_stats_tlv *htt_stat_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
int i;
const char *mgmt_frm_type[ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1] = {
"assoc_req", "assoc_resp",
"reassoc_req", "reassoc_resp",
"probe_req", "probe_resp",
"timing_advertisement", "reserved",
"beacon", "atim", "disassoc",
"auth", "deauth", "action", "action_no_ack"};
len += scnprintf(buf + len, buf_len - len,
"HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:\n");
len += scnprintf(buf + len, buf_len - len,
"peer_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
htt_stat_buf->peer_mac_addr[0], htt_stat_buf->peer_mac_addr[1],
htt_stat_buf->peer_mac_addr[2], htt_stat_buf->peer_mac_addr[3],
htt_stat_buf->peer_mac_addr[4], htt_stat_buf->peer_mac_addr[5]);
len += scnprintf(buf + len, buf_len - len, "peer_tx_mgmt_subtype:\n");
for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
mgmt_frm_type[i],
htt_stat_buf->peer_rx_mgmt_subtype[i]);
len += scnprintf(buf + len, buf_len - len, "peer_rx_mgmt_subtype:\n");
for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
mgmt_frm_type[i],
htt_stat_buf->peer_rx_mgmt_subtype[i]);
len += scnprintf(buf + len, buf_len - len, "\n");
stats_req->buf_len = len;
}
static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
{
struct debug_htt_stats_req *stats_req = user_data;
switch (tag) {
case HTT_STATS_TX_PDEV_CMN_TAG:
htt_print_tx_pdev_stats_cmn_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
htt_print_tx_pdev_stats_urrn_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_PDEV_SIFS_TAG:
htt_print_tx_pdev_stats_sifs_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_PDEV_FLUSH_TAG:
htt_print_tx_pdev_stats_flush_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_PDEV_PHY_ERR_TAG:
htt_print_tx_pdev_stats_phy_err_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
htt_print_tx_pdev_stats_sifs_hist_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG:
htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(tag_buf, stats_req);
break;
case HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG:
htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(tag_buf, len,
stats_req);
break;
case HTT_STATS_STRING_TAG:
htt_print_stats_string_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_HWQ_CMN_TAG:
htt_print_tx_hwq_stats_cmn_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG:
htt_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_HWQ_CMD_RESULT_TAG:
htt_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_HWQ_CMD_STALL_TAG:
htt_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_HWQ_FES_STATUS_TAG:
htt_print_tx_hwq_fes_result_stats_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG:
htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG:
htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
htt_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
htt_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
htt_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_TQM_CMN_TAG:
htt_print_tx_tqm_cmn_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_TQM_PDEV_TAG:
htt_print_tx_tqm_pdev_stats_tlv_v(tag_buf, stats_req);
break;
case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG:
htt_print_tx_tqm_cmdq_status_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
htt_print_tx_de_classify_failed_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
htt_print_tx_de_classify_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
htt_print_tx_de_classify_status_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG:
htt_print_tx_de_fw2wbm_ring_full_hist_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_DE_CMN_TAG:
htt_print_tx_de_cmn_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_RING_IF_TAG:
htt_print_ring_if_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG:
htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_SFM_CMN_TAG:
htt_print_sfm_cmn_tlv(tag_buf, stats_req);
break;
case HTT_STATS_SRING_STATS_TAG:
htt_print_sring_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_RX_PDEV_FW_STATS_TAG:
htt_print_rx_pdev_fw_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG:
htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf, stats_req);
break;
case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG:
htt_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_RX_SOC_FW_STATS_TAG:
htt_print_rx_soc_fw_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG:
htt_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG:
htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(
tag_buf, len, stats_req);
break;
case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG:
htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(
tag_buf, len, stats_req);
break;
case HTT_STATS_RX_REFILL_REO_ERR_TAG:
htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(
tag_buf, len, stats_req);
break;
case HTT_STATS_RX_REO_RESOURCE_STATS_TAG:
htt_print_rx_reo_debug_stats_tlv_v(
tag_buf, stats_req);
break;
case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG:
htt_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
htt_print_tx_pdev_rate_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
htt_print_rx_pdev_rate_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_SCHED_CMN_TAG:
htt_print_stats_tx_sched_cmn_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_PDEV_MPDU_STATS_TAG:
htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
htt_print_sched_txq_cmd_posted_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_RING_IF_CMN_TAG:
htt_print_ring_if_cmn_tlv(tag_buf, stats_req);
break;
case HTT_STATS_SFM_CLIENT_USER_TAG:
htt_print_sfm_client_user_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_SFM_CLIENT_TAG:
htt_print_sfm_client_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
htt_print_tx_tqm_error_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
htt_print_sched_txq_cmd_reaped_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_SRING_CMN_TAG:
htt_print_sring_cmn_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_SOUNDING_STATS_TAG:
htt_print_tx_sounding_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG:
htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG:
htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_SELFGEN_AC_STATS_TAG:
htt_print_tx_selfgen_ac_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_SELFGEN_AX_STATS_TAG:
htt_print_tx_selfgen_ax_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG:
htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG:
htt_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG:
htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG:
htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_HW_INTR_MISC_TAG:
htt_print_hw_stats_intr_misc_tlv(tag_buf, stats_req);
break;
case HTT_STATS_HW_WD_TIMEOUT_TAG:
htt_print_hw_stats_wd_timeout_tlv(tag_buf, stats_req);
break;
case HTT_STATS_HW_PDEV_ERRS_TAG:
htt_print_hw_stats_pdev_errs_tlv(tag_buf, stats_req);
break;
case HTT_STATS_COUNTER_NAME_TAG:
htt_print_counter_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_TID_DETAILS_TAG:
htt_print_tx_tid_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_TID_DETAILS_V1_TAG:
htt_print_tx_tid_stats_v1_tlv(tag_buf, stats_req);
break;
case HTT_STATS_RX_TID_DETAILS_TAG:
htt_print_rx_tid_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PEER_STATS_CMN_TAG:
htt_print_peer_stats_cmn_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PEER_DETAILS_TAG:
htt_print_peer_details_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PEER_MSDU_FLOWQ_TAG:
htt_print_msdu_flow_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PEER_TX_RATE_STATS_TAG:
htt_print_tx_peer_rate_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PEER_RX_RATE_STATS_TAG:
htt_print_rx_peer_rate_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TX_DE_COMPL_STATS_TAG:
htt_print_tx_de_compl_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG:
case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG:
case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG:
htt_print_pdev_cca_stats_hist_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
htt_print_pdev_stats_cca_counters_tlv(tag_buf, stats_req);
break;
case HTT_STATS_WHAL_TX_TAG:
htt_print_hw_stats_whal_tx_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PDEV_TWT_SESSIONS_TAG:
htt_print_pdev_stats_twt_sessions_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PDEV_TWT_SESSION_TAG:
htt_print_pdev_stats_twt_session_tlv(tag_buf, stats_req);
break;
case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
htt_print_sched_txq_sched_order_su_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
htt_print_sched_txq_sched_ineligibility_tlv_v(tag_buf, len, stats_req);
break;
case HTT_STATS_PDEV_OBSS_PD_TAG:
htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req);
break;
case HTT_STATS_RING_BACKPRESSURE_STATS_TAG:
htt_print_backpressure_stats_tlv_v(tag_buf, user_data);
break;
case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG:
htt_print_txbf_ofdma_ndpa_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG:
htt_print_txbf_ofdma_ndp_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG:
htt_print_txbf_ofdma_brp_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG:
htt_print_txbf_ofdma_steer_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PHY_COUNTERS_TAG:
htt_print_phy_counters_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PHY_STATS_TAG:
htt_print_phy_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PHY_RESET_COUNTERS_TAG:
htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PHY_RESET_STATS_TAG:
htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:
htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req);
break;
default:
break;
}
return 0;
}
void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k_htt_extd_stats_msg *msg;
struct debug_htt_stats_req *stats_req;
struct ath11k *ar;
u32 len;
u64 cookie;
int ret;
bool send_completion = false;
u8 pdev_id;
msg = (struct ath11k_htt_extd_stats_msg *)skb->data;
cookie = msg->cookie;
if (FIELD_GET(HTT_STATS_COOKIE_MSB, cookie) != HTT_STATS_MAGIC_VALUE) {
ath11k_warn(ab, "received invalid htt ext stats event\n");
return;
}
pdev_id = FIELD_GET(HTT_STATS_COOKIE_LSB, cookie);
rcu_read_lock();
ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
rcu_read_unlock();
if (!ar) {
ath11k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
return;
}
stats_req = ar->debug.htt_stats.stats_req;
if (!stats_req)
return;
spin_lock_bh(&ar->debug.htt_stats.lock);
stats_req->done = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_DONE, msg->info1);
if (stats_req->done)
send_completion = true;
spin_unlock_bh(&ar->debug.htt_stats.lock);
len = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_LENGTH, msg->info1);
ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
ath11k_dbg_htt_ext_stats_parse,
stats_req);
if (ret)
ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
if (send_completion)
complete(&stats_req->cmpln);
}
static ssize_t ath11k_read_htt_stats_type(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32];
size_t len;
len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.type);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath11k_write_htt_stats_type(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
u8 type;
int ret;
ret = kstrtou8_from_user(user_buf, count, 0, &type);
if (ret)
return ret;
if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS)
return -E2BIG;
if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
return -EPERM;
ar->debug.htt_stats.type = type;
ret = count;
return ret;
}
static const struct file_operations fops_htt_stats_type = {
.read = ath11k_read_htt_stats_type,
.write = ath11k_write_htt_stats_type,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
const u8 *mac_addr,
struct htt_ext_stats_cfg_params *cfg_params)
{
if (!cfg_params)
return -EINVAL;
switch (type) {
case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ:
case ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_HWQS;
break;
case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_TXQS;
break;
case ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_CMDQS;
break;
case ATH11K_DBG_HTT_EXT_STATS_PEER_INFO:
cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
cfg_params->cfg0 |= FIELD_PREP(GENMASK(15, 1),
HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
cfg_params->cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
cfg_params->cfg2 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
cfg_params->cfg2 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
cfg_params->cfg3 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
cfg_params->cfg3 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
break;
case ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO:
case ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_RINGS;
break;
case ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS;
break;
case ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE;
break;
case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
break;
case ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS:
cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
cfg_params->cfg1 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
cfg_params->cfg1 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
cfg_params->cfg1 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
cfg_params->cfg1 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
break;
default:
break;
}
return 0;
}
int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
{
struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
u8 type = stats_req->type;
u64 cookie = 0;
int ret, pdev_id = ar->pdev->pdev_id;
struct htt_ext_stats_cfg_params cfg_params = { 0 };
init_completion(&stats_req->cmpln);
stats_req->done = false;
stats_req->pdev_id = pdev_id;
cookie = FIELD_PREP(HTT_STATS_COOKIE_MSB, HTT_STATS_MAGIC_VALUE) |
FIELD_PREP(HTT_STATS_COOKIE_LSB, pdev_id);
ret = ath11k_prep_htt_stats_cfg_params(ar, type, stats_req->peer_addr,
&cfg_params);
if (ret) {
ath11k_warn(ar->ab, "failed to set htt stats cfg params: %d\n", ret);
return ret;
}
ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
if (ret) {
ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
return ret;
}
while (!wait_for_completion_timeout(&stats_req->cmpln, 3 * HZ)) {
spin_lock_bh(&ar->debug.htt_stats.lock);
if (!stats_req->done) {
stats_req->done = true;
spin_unlock_bh(&ar->debug.htt_stats.lock);
ath11k_warn(ar->ab, "stats request timed out\n");
return -ETIMEDOUT;
}
spin_unlock_bh(&ar->debug.htt_stats.lock);
}
return 0;
}
static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
struct debug_htt_stats_req *stats_req;
u8 type = ar->debug.htt_stats.type;
int ret;
if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO ||
type == ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS)
return -EPERM;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto err_unlock;
}
if (ar->debug.htt_stats.stats_req) {
ret = -EAGAIN;
goto err_unlock;
}
stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
if (!stats_req) {
ret = -ENOMEM;
goto err_unlock;
}
ar->debug.htt_stats.stats_req = stats_req;
stats_req->type = type;
ret = ath11k_debugfs_htt_stats_req(ar);
if (ret < 0)
goto out;
file->private_data = stats_req;
mutex_unlock(&ar->conf_mutex);
return 0;
out:
vfree(stats_req);
ar->debug.htt_stats.stats_req = NULL;
err_unlock:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_release_htt_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
mutex_lock(&ar->conf_mutex);
vfree(file->private_data);
ar->debug.htt_stats.stats_req = NULL;
mutex_unlock(&ar->conf_mutex);
return 0;
}
static ssize_t ath11k_read_htt_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct debug_htt_stats_req *stats_req = file->private_data;
char *buf;
u32 length = 0;
buf = stats_req->buf;
length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
return simple_read_from_buffer(user_buf, count, ppos, buf, length);
}
static const struct file_operations fops_dump_htt_stats = {
.open = ath11k_open_htt_stats,
.release = ath11k_release_htt_stats,
.read = ath11k_read_htt_stats,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_read_htt_stats_reset(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
char buf[32];
size_t len;
len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.reset);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath11k_write_htt_stats_reset(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
u8 type;
struct htt_ext_stats_cfg_params cfg_params = { 0 };
int ret;
ret = kstrtou8_from_user(user_buf, count, 0, &type);
if (ret)
return ret;
if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS ||
type == ATH11K_DBG_HTT_EXT_STATS_RESET)
return -E2BIG;
mutex_lock(&ar->conf_mutex);
cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
ATH11K_DBG_HTT_EXT_STATS_RESET,
&cfg_params,
0ULL);
if (ret) {
ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
mutex_unlock(&ar->conf_mutex);
return ret;
}
ar->debug.htt_stats.reset = type;
mutex_unlock(&ar->conf_mutex);
ret = count;
return ret;
}
static const struct file_operations fops_htt_stats_reset = {
.read = ath11k_read_htt_stats_reset,
.write = ath11k_write_htt_stats_reset,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath11k_debugfs_htt_stats_init(struct ath11k *ar)
{
spin_lock_init(&ar->debug.htt_stats.lock);
debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
ar, &fops_htt_stats_type);
debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
ar, &fops_dump_htt_stats);
debugfs_create_file("htt_stats_reset", 0600, ar->debug.debugfs_pdev,
ar, &fops_htt_stats_reset);
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "debug.h"
#define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
{
u32 *temp;
int idx;
size = size >> 2;
for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
if (*temp == ATH11K_DB_MAGIC_VALUE)
return -EINVAL;
}
return 0;
}
static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
void *buffer, u32 size)
{
/* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
* and the variable size is expected to be the number of u32 values
* to be stored, not the number of bytes.
*/
size = size / sizeof(u32);
memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
}
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
struct ath11k_dbring_element *buff,
enum wmi_direct_buffer_module id)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
dma_addr_t paddr;
void *ptr_aligned, *ptr_unaligned, *desc;
int ret;
int buf_id;
u32 cookie;
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
lockdep_assert_held(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
ptr_unaligned = buff->payload;
ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
DMA_FROM_DEVICE);
ret = dma_mapping_error(ab->dev, paddr);
if (ret)
goto err;
spin_lock_bh(&ring->idr_lock);
buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
spin_unlock_bh(&ring->idr_lock);
if (buf_id < 0) {
ret = -ENOBUFS;
goto err_dma_unmap;
}
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!desc) {
ret = -ENOENT;
goto err_idr_remove;
}
buff->paddr = paddr;
cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
ath11k_hal_srng_access_end(ab, srng);
return 0;
err_idr_remove:
spin_lock_bh(&ring->idr_lock);
idr_remove(&ring->bufs_idr, buf_id);
spin_unlock_bh(&ring->idr_lock);
err_dma_unmap:
dma_unmap_single(ab->dev, paddr, ring->buf_sz,
DMA_FROM_DEVICE);
err:
ath11k_hal_srng_access_end(ab, srng);
return ret;
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
struct ath11k_dbring *ring,
enum wmi_direct_buffer_module id)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
int num_remain, req_entries, num_free;
u32 align;
int size, ret;
srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
spin_lock_bh(&srng->lock);
num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
req_entries = min(num_free, ring->bufs_max);
num_remain = req_entries;
align = ring->buf_align;
size = ring->buf_sz + align - 1;
while (num_remain > 0) {
buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
if (!buff)
break;
buff->payload = kzalloc(size, GFP_ATOMIC);
if (!buff->payload) {
kfree(buff);
break;
}
ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
kfree(buff->payload);
kfree(buff);
break;
}
num_remain--;
}
spin_unlock_bh(&srng->lock);
return num_remain;
}
int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
struct ath11k_dbring *ring,
enum wmi_direct_buffer_module id)
{
struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
int ret;
if (id >= WMI_DIRECT_BUF_MAX)
return -EINVAL;
param.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
param.module_id = id;
param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
param.num_elems = ring->bufs_max;
param.buf_size = ring->buf_sz;
param.num_resp_per_event = ring->num_resp_per_event;
param.event_timeout_ms = ring->event_timeout_ms;
ret = ath11k_wmi_pdev_dma_ring_cfg(ar, ¶m);
if (ret) {
ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
return ret;
}
return 0;
}
int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
u32 num_resp_per_event, u32 event_timeout_ms,
int (*handler)(struct ath11k *,
struct ath11k_dbring_data *))
{
if (WARN_ON(!ring))
return -EINVAL;
ring->num_resp_per_event = num_resp_per_event;
ring->event_timeout_ms = event_timeout_ms;
ring->handler = handler;
return 0;
}
int ath11k_dbring_buf_setup(struct ath11k *ar,
struct ath11k_dbring *ring,
struct ath11k_dbring_cap *db_cap)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
int ret;
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
ring->bufs_max = ring->refill_srng.size /
ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
ring->buf_sz = db_cap->min_buf_sz;
ring->buf_align = db_cap->min_buf_align;
ring->pdev_id = db_cap->pdev_id;
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
return ret;
}
int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
int ring_num, int num_entries)
{
int ret;
ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
ring_num, ar->pdev_idx, num_entries);
if (ret < 0) {
ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
ret, ring_num);
goto err;
}
return 0;
err:
ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
return ret;
}
int ath11k_dbring_get_cap(struct ath11k_base *ab,
u8 pdev_idx,
enum wmi_direct_buffer_module id,
struct ath11k_dbring_cap *db_cap)
{
int i;
if (!ab->num_db_cap || !ab->db_caps)
return -ENOENT;
if (id >= WMI_DIRECT_BUF_MAX)
return -EINVAL;
for (i = 0; i < ab->num_db_cap; i++) {
if (pdev_idx == ab->db_caps[i].pdev_id &&
id == ab->db_caps[i].id) {
*db_cap = ab->db_caps[i];
return 0;
}
}
return -ENOENT;
}
int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
struct ath11k_dbring_buf_release_event *ev)
{
struct ath11k_dbring *ring;
struct hal_srng *srng;
struct ath11k *ar;
struct ath11k_dbring_element *buff;
struct ath11k_dbring_data handler_data;
struct ath11k_buffer_addr desc;
u8 *vaddr_unalign;
u32 num_entry, num_buff_reaped;
u8 pdev_idx, rbm, module_id;
u32 cookie;
int buf_id;
int size;
dma_addr_t paddr;
int ret = 0;
pdev_idx = ev->fixed.pdev_id;
module_id = ev->fixed.module_id;
if (pdev_idx >= ab->num_radios) {
ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
return -EINVAL;
}
if (ev->fixed.num_buf_release_entry !=
ev->fixed.num_meta_data_entry) {
ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
ev->fixed.num_buf_release_entry,
ev->fixed.num_meta_data_entry);
return -EINVAL;
}
ar = ab->pdevs[pdev_idx].ar;
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
ret = -EINVAL;
goto rcu_unlock;
}
switch (ev->fixed.module_id) {
case WMI_DIRECT_BUF_SPECTRAL:
ring = ath11k_spectral_get_dbring(ar);
break;
default:
ring = NULL;
ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
ev->fixed.module_id);
break;
}
if (!ring) {
ret = -EINVAL;
goto rcu_unlock;
}
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
num_entry = ev->fixed.num_buf_release_entry;
size = ring->buf_sz + ring->buf_align - 1;
num_buff_reaped = 0;
spin_lock_bh(&srng->lock);
while (num_buff_reaped < num_entry) {
desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
handler_data.meta = ev->meta_data[num_buff_reaped];
num_buff_reaped++;
ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
spin_lock_bh(&ring->idr_lock);
buff = idr_find(&ring->bufs_idr, buf_id);
if (!buff) {
spin_unlock_bh(&ring->idr_lock);
continue;
}
idr_remove(&ring->bufs_idr, buf_id);
spin_unlock_bh(&ring->idr_lock);
dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
DMA_FROM_DEVICE);
ath11k_debugfs_add_dbring_entry(ar, module_id,
ATH11K_DBG_DBR_EVENT_RX, srng);
if (ring->handler) {
vaddr_unalign = buff->payload;
handler_data.data = PTR_ALIGN(vaddr_unalign,
ring->buf_align);
handler_data.data_sz = ring->buf_sz;
ring->handler(ar, &handler_data);
}
buff->paddr = 0;
memset(buff->payload, 0, size);
ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
}
spin_unlock_bh(&srng->lock);
rcu_unlock:
rcu_read_unlock();
return ret;
}
void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
{
ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
}
void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
{
struct ath11k_dbring_element *buff;
int buf_id;
spin_lock_bh(&ring->idr_lock);
idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
idr_remove(&ring->bufs_idr, buf_id);
dma_unmap_single(ar->ab->dev, buff->paddr,
ring->buf_sz, DMA_FROM_DEVICE);
kfree(buff->payload);
kfree(buff);
}
idr_destroy(&ring->bufs_idr);
spin_unlock_bh(&ring->idr_lock);
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/dbring.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 16,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE1: target->host HTT + HTC control */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE2: target->host WMI */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE3: host->target WMI (mac0) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE4: host->target HTT */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 2048,
.src_sz_max = 256,
.dest_nentries = 0,
},
/* CE5: target->host pktlog */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
},
/* CE6: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE7: host->target WMI (mac1) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE8: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE9: host->target WMI (mac2) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE10: target->host HTT */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE11: Not used */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
};
const struct ce_attr ath11k_host_ce_config_qca6390[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 16,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE1: target->host HTT + HTC control */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE2: target->host WMI */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE3: host->target WMI (mac0) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE4: host->target HTT */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 2048,
.src_sz_max = 256,
.dest_nentries = 0,
},
/* CE5: target->host pktlog */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
},
/* CE6: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE7: host->target WMI (mac1) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE8: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
};
const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 16,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE1: target->host HTT + HTC control */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE2: target->host WMI */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 32,
.recv_cb = ath11k_htc_rx_completion_handler,
},
/* CE3: host->target WMI (mac0) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
.send_cb = ath11k_htc_tx_completion_handler,
},
/* CE4: host->target HTT */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 2048,
.src_sz_max = 256,
.dest_nentries = 0,
},
/* CE5: target->host pktlog */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
},
};
static bool ath11k_ce_need_shadow_fix(int ce_id)
{
/* only ce4 needs shadow workaround */
if (ce_id == 4)
return true;
return false;
}
void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
{
int i;
if (!ab->hw_params.supports_shadow_regs)
return;
for (i = 0; i < ab->hw_params.ce_count; i++)
if (ath11k_ce_need_shadow_fix(i))
ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
}
static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
struct sk_buff *skb, dma_addr_t paddr)
{
struct ath11k_base *ab = pipe->ab;
struct ath11k_ce_ring *ring = pipe->dest_ring;
struct hal_srng *srng;
unsigned int write_index;
unsigned int nentries_mask = ring->nentries_mask;
u32 *desc;
int ret;
lockdep_assert_held(&ab->ce.ce_lock);
write_index = ring->write_index;
srng = &ab->hal.srng_list[ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
ret = -ENOSPC;
goto exit;
}
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!desc) {
ret = -ENOSPC;
goto exit;
}
ath11k_hal_ce_dst_set_desc(desc, paddr);
ring->skb[write_index] = skb;
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
ring->write_index = write_index;
pipe->rx_buf_needed--;
ret = 0;
exit:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return ret;
}
static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct sk_buff *skb;
dma_addr_t paddr;
int ret = 0;
if (!(pipe->dest_ring || pipe->status_ring))
return 0;
spin_lock_bh(&ab->ce.ce_lock);
while (pipe->rx_buf_needed) {
skb = dev_alloc_skb(pipe->buf_sz);
if (!skb) {
ret = -ENOMEM;
goto exit;
}
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ab->dev, paddr))) {
ath11k_warn(ab, "failed to dma map ce rx buf\n");
dev_kfree_skb_any(skb);
ret = -EIO;
goto exit;
}
ATH11K_SKB_RXCB(skb)->paddr = paddr;
ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
goto exit;
}
}
exit:
spin_unlock_bh(&ab->ce.ce_lock);
return ret;
}
static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
struct sk_buff **skb, int *nbytes)
{
struct ath11k_base *ab = pipe->ab;
struct hal_srng *srng;
unsigned int sw_index;
unsigned int nentries_mask;
u32 *desc;
int ret = 0;
spin_lock_bh(&ab->ce.ce_lock);
sw_index = pipe->dest_ring->sw_index;
nentries_mask = pipe->dest_ring->nentries_mask;
srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
if (!desc) {
ret = -EIO;
goto err;
}
*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
if (*nbytes == 0) {
ret = -EIO;
goto err;
}
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
pipe->dest_ring->sw_index = sw_index;
pipe->rx_buf_needed++;
err:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return ret;
}
static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct sk_buff *skb;
struct sk_buff_head list;
unsigned int nbytes, max_nbytes;
int ret;
__skb_queue_head_init(&list);
while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
}
skb_put(skb, nbytes);
__skb_queue_tail(&list, skb);
}
while ((skb = __skb_dequeue(&list))) {
ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->recv_cb(ab, skb);
}
ret = ath11k_ce_rx_post_pipe(pipe);
if (ret && ret != -ENOSPC) {
ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
pipe->pipe_num, ret);
mod_timer(&ab->rx_replenish_retry,
jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
}
}
static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct hal_srng *srng;
unsigned int sw_index;
unsigned int nentries_mask;
struct sk_buff *skb;
u32 *desc;
spin_lock_bh(&ab->ce.ce_lock);
sw_index = pipe->src_ring->sw_index;
nentries_mask = pipe->src_ring->nentries_mask;
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
desc = ath11k_hal_srng_src_reap_next(ab, srng);
if (!desc) {
skb = ERR_PTR(-EIO);
goto err_unlock;
}
skb = pipe->src_ring->skb[sw_index];
pipe->src_ring->skb[sw_index] = NULL;
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
pipe->src_ring->sw_index = sw_index;
err_unlock:
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return skb;
}
static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct sk_buff *skb;
struct sk_buff_head list;
__skb_queue_head_init(&list);
while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
if (!skb)
continue;
dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
DMA_TO_DEVICE);
if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
dev_kfree_skb_any(skb);
continue;
}
__skb_queue_tail(&list, skb);
}
while ((skb = __skb_dequeue(&list))) {
ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->send_cb(ab, skb);
}
}
static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
struct hal_srng_params *ring_params)
{
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
u32 addr_lo;
u32 addr_hi;
int ret;
ret = ath11k_get_user_msi_vector(ab, "CE",
&msi_data_count, &msi_data_start,
&msi_irq_start);
if (ret)
return;
ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
ring_params->msi_addr = addr_lo;
ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
}
static int ath11k_ce_init_ring(struct ath11k_base *ab,
struct ath11k_ce_ring *ce_ring,
int ce_id, enum hal_ring_type type)
{
struct hal_srng_params params = { 0 };
int ret;
params.ring_base_paddr = ce_ring->base_addr_ce_space;
params.ring_base_vaddr = ce_ring->base_addr_owner_space;
params.num_entries = ce_ring->nentries;
if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms);
switch (type) {
case HAL_CE_SRC:
if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
params.intr_batch_cntr_thres_entries = 1;
break;
case HAL_CE_DST:
params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_timer_thres_us = 1024;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.low_threshold = ce_ring->nentries - 3;
}
break;
case HAL_CE_DST_STATUS:
if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_batch_cntr_thres_entries = 1;
params.intr_timer_thres_us = 0x1000;
}
break;
default:
ath11k_warn(ab, "Invalid CE ring type %d\n", type);
return -EINVAL;
}
/* TODO: Init other params needed by HAL to init the ring */
ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms);
if (ret < 0) {
ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
ret, ce_id);
return ret;
}
ce_ring->hal_ring_id = ret;
if (ab->hw_params.supports_shadow_regs &&
ath11k_ce_need_shadow_fix(ce_id))
ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
ce_ring->hal_ring_id);
return 0;
}
static struct ath11k_ce_ring *
ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
{
struct ath11k_ce_ring *ce_ring;
dma_addr_t base_addr;
ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
if (ce_ring == NULL)
return ERR_PTR(-ENOMEM);
ce_ring->nentries = nentries;
ce_ring->nentries_mask = nentries - 1;
/* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
ce_ring->base_addr_owner_space_unaligned =
dma_alloc_coherent(ab->dev,
nentries * desc_sz + CE_DESC_RING_ALIGN,
&base_addr, GFP_KERNEL);
if (!ce_ring->base_addr_owner_space_unaligned) {
kfree(ce_ring);
return ERR_PTR(-ENOMEM);
}
ce_ring->base_addr_ce_space_unaligned = base_addr;
ce_ring->base_addr_owner_space = PTR_ALIGN(
ce_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
ce_ring->base_addr_ce_space = ALIGN(
ce_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
return ce_ring;
}
static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
struct ath11k_ce_ring *ring;
int nentries;
int desc_sz;
pipe->attr_flags = attr->flags;
if (attr->src_nentries) {
pipe->send_cb = attr->send_cb;
nentries = roundup_pow_of_two(attr->src_nentries);
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
if (IS_ERR(ring))
return PTR_ERR(ring);
pipe->src_ring = ring;
}
if (attr->dest_nentries) {
pipe->recv_cb = attr->recv_cb;
nentries = roundup_pow_of_two(attr->dest_nentries);
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
if (IS_ERR(ring))
return PTR_ERR(ring);
pipe->dest_ring = ring;
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
if (IS_ERR(ring))
return PTR_ERR(ring);
pipe->status_ring = ring;
}
return 0;
}
void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
if (attr->src_nentries)
ath11k_ce_tx_process_cb(pipe);
if (pipe->recv_cb)
ath11k_ce_recv_process_cb(pipe);
}
void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
ath11k_ce_tx_process_cb(pipe);
}
EXPORT_SYMBOL(ath11k_ce_per_engine_service);
int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
u16 transfer_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
struct hal_srng *srng;
u32 *desc;
unsigned int write_index, sw_index;
unsigned int nentries_mask;
int ret = 0;
u8 byte_swap_data = 0;
int num_used;
/* Check if some entries could be regained by handling tx completion if
* the CE has interrupts disabled and the used entries is more than the
* defined usage threshold.
*/
if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
spin_lock_bh(&ab->ce.ce_lock);
write_index = pipe->src_ring->write_index;
sw_index = pipe->src_ring->sw_index;
if (write_index >= sw_index)
num_used = write_index - sw_index;
else
num_used = pipe->src_ring->nentries - sw_index +
write_index;
spin_unlock_bh(&ab->ce.ce_lock);
if (num_used > ATH11K_CE_USAGE_THRESHOLD)
ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
}
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
return -ESHUTDOWN;
spin_lock_bh(&ab->ce.ce_lock);
write_index = pipe->src_ring->write_index;
nentries_mask = pipe->src_ring->nentries_mask;
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
ath11k_hal_srng_access_end(ab, srng);
ret = -ENOBUFS;
goto err_unlock;
}
desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
if (!desc) {
ath11k_hal_srng_access_end(ab, srng);
ret = -ENOBUFS;
goto err_unlock;
}
if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
byte_swap_data = 1;
ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
skb->len, transfer_id, byte_swap_data);
pipe->src_ring->skb[write_index] = skb;
pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
write_index);
ath11k_hal_srng_access_end(ab, srng);
if (ath11k_ce_need_shadow_fix(pipe_id))
ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return 0;
err_unlock:
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return ret;
}
static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
{
struct ath11k_base *ab = pipe->ab;
struct ath11k_ce_ring *ring = pipe->dest_ring;
struct sk_buff *skb;
int i;
if (!(ring && pipe->buf_sz))
return;
for (i = 0; i < ring->nentries; i++) {
skb = ring->skb[i];
if (!skb)
continue;
ring->skb[i] = NULL;
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
}
static void ath11k_ce_shadow_config(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ab->hw_params.host_ce_config[i].src_nentries)
ath11k_hal_srng_update_shadow_config(ab,
HAL_CE_SRC, i);
if (ab->hw_params.host_ce_config[i].dest_nentries) {
ath11k_hal_srng_update_shadow_config(ab,
HAL_CE_DST, i);
ath11k_hal_srng_update_shadow_config(ab,
HAL_CE_DST_STATUS, i);
}
}
}
void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
u32 **shadow_cfg, u32 *shadow_cfg_len)
{
if (!ab->hw_params.supports_shadow_regs)
return;
ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
/* shadow is already configured */
if (*shadow_cfg_len)
return;
/* shadow isn't configured yet, configure now.
* non-CE srngs are configured firstly, then
* all CE srngs.
*/
ath11k_hal_srng_shadow_config(ab);
ath11k_ce_shadow_config(ab);
/* get the shadow configuration */
ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
}
EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int pipe_num;
ath11k_ce_stop_shadow_timers(ab);
for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
pipe = &ab->ce.ce_pipe[pipe_num];
ath11k_ce_rx_pipe_cleanup(pipe);
/* Cleanup any src CE's which have interrupts disabled */
ath11k_ce_poll_send_completed(ab, pipe_num);
/* NOTE: Should we also clean up tx buffer in all pipes? */
}
}
EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int i;
int ret;
for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
ret = ath11k_ce_rx_post_pipe(pipe);
if (ret) {
if (ret == -ENOSPC)
continue;
ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
i, ret);
mod_timer(&ab->rx_replenish_retry,
jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
return;
}
}
}
EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
void ath11k_ce_rx_replenish_retry(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
ath11k_ce_rx_post_buf(ab);
}
int ath11k_ce_init_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int i;
int ret;
for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
if (pipe->src_ring) {
ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
HAL_CE_SRC);
if (ret) {
ath11k_warn(ab, "failed to init src ring: %d\n",
ret);
/* Should we clear any partial init */
return ret;
}
pipe->src_ring->write_index = 0;
pipe->src_ring->sw_index = 0;
}
if (pipe->dest_ring) {
ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
HAL_CE_DST);
if (ret) {
ath11k_warn(ab, "failed to init dest ring: %d\n",
ret);
/* Should we clear any partial init */
return ret;
}
pipe->rx_buf_needed = pipe->dest_ring->nentries ?
pipe->dest_ring->nentries - 2 : 0;
pipe->dest_ring->write_index = 0;
pipe->dest_ring->sw_index = 0;
}
if (pipe->status_ring) {
ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
HAL_CE_DST_STATUS);
if (ret) {
ath11k_warn(ab, "failed to init dest status ing: %d\n",
ret);
/* Should we clear any partial init */
return ret;
}
pipe->status_ring->write_index = 0;
pipe->status_ring->sw_index = 0;
}
}
return 0;
}
void ath11k_ce_free_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
struct ath11k_ce_ring *ce_ring;
int desc_sz;
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
if (ath11k_ce_need_shadow_fix(i))
ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
if (pipe->src_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
ce_ring = pipe->src_ring;
dma_free_coherent(ab->dev,
pipe->src_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
ce_ring->base_addr_owner_space_unaligned,
ce_ring->base_addr_ce_space_unaligned);
kfree(pipe->src_ring);
pipe->src_ring = NULL;
}
if (pipe->dest_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
ce_ring = pipe->dest_ring;
dma_free_coherent(ab->dev,
pipe->dest_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
ce_ring->base_addr_owner_space_unaligned,
ce_ring->base_addr_ce_space_unaligned);
kfree(pipe->dest_ring);
pipe->dest_ring = NULL;
}
if (pipe->status_ring) {
desc_sz =
ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
ce_ring = pipe->status_ring;
dma_free_coherent(ab->dev,
pipe->status_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
ce_ring->base_addr_owner_space_unaligned,
ce_ring->base_addr_ce_space_unaligned);
kfree(pipe->status_ring);
pipe->status_ring = NULL;
}
}
}
EXPORT_SYMBOL(ath11k_ce_free_pipes);
int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int i;
int ret;
const struct ce_attr *attr;
spin_lock_init(&ab->ce.ce_lock);
for (i = 0; i < ab->hw_params.ce_count; i++) {
attr = &ab->hw_params.host_ce_config[i];
pipe = &ab->ce.ce_pipe[i];
pipe->pipe_num = i;
pipe->ab = ab;
pipe->buf_sz = attr->src_sz_max;
ret = ath11k_ce_alloc_pipe(ab, i);
if (ret) {
/* Free any partial successful allocation */
ath11k_ce_free_pipes(ab);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
/* For Big Endian Host, Copy Engine byte_swap is enabled
* When Copy Engine does byte_swap, need to byte swap again for the
* Host to get/put buffer content in the correct byte order
*/
void ath11k_ce_byte_swap(void *mem, u32 len)
{
int i;
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
if (!mem)
return;
for (i = 0; i < (len / 4); i++) {
*(u32 *)mem = swab32(*(u32 *)mem);
mem += 4;
}
}
}
int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
{
if (ce_id >= ab->hw_params.ce_count)
return -EINVAL;
return ab->hw_params.host_ce_config[ce_id].flags;
}
EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
|
linux-master
|
drivers/net/wireless/ath/ath11k/ce.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include "core.h"
#include "debug.h"
/* World regdom to be used in case default regd from fw is unavailable */
#define ATH11K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
#define ATH11K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ATH11K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ETSI_WEATHER_RADAR_BAND_LOW 5590
#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
static const struct ieee80211_regdomain ath11k_world_regd = {
.n_reg_rules = 3,
.alpha2 = "00",
.reg_rules = {
ATH11K_2GHZ_CH01_11,
ATH11K_5GHZ_5150_5350,
ATH11K_5GHZ_5725_5850,
}
};
static bool ath11k_regdom_changes(struct ath11k *ar, char *alpha2)
{
const struct ieee80211_regdomain *regd;
regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
/* This can happen during wiphy registration where the previous
* user request is received before we update the regd received
* from firmware.
*/
if (!regd)
return true;
return memcmp(regd->alpha2, alpha2, 2) != 0;
}
static void
ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
struct wmi_set_current_country_params set_current_param = {};
struct ath11k *ar = hw->priv;
int ret;
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
* self managed wiphy's
*/
if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
ath11k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
return;
}
if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Country Setting is not allowed\n");
return;
}
if (!ath11k_regdom_changes(ar, request->alpha2)) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Country is already set\n");
return;
}
/* Set the country code to the firmware and will receive
* the WMI_REG_CHAN_LIST_CC EVENT for updating the
* reg info
*/
if (ar->ab->hw_params.current_cc_support) {
memcpy(&set_current_param.alpha2, request->alpha2, 2);
memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret)
ath11k_warn(ar->ab,
"failed set current country code: %d\n", ret);
} else {
init_country_param.flags = ALPHA_IS_SET;
memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
init_country_param.cc_info.alpha2[2] = 0;
ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
if (ret)
ath11k_warn(ar->ab,
"INIT Country code set to fw failed : %d\n", ret);
}
ath11k_mac_11d_scan_stop(ar);
ar->regdom_set_by_user = true;
}
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
{
struct ieee80211_supported_band **bands;
struct scan_chan_list_params *params;
struct ieee80211_channel *channel;
struct ieee80211_hw *hw = ar->hw;
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
int i, ret, left;
if (wait && ar->state_11d != ATH11K_11D_IDLE) {
left = wait_for_completion_timeout(&ar->completed_11d_scan,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive 11d scan complete: timed out\n");
ar->state_11d = ATH11K_11D_IDLE;
}
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"11d scan wait left time %d\n", left);
}
if (wait &&
(ar->scan.state == ATH11K_SCAN_STARTING ||
ar->scan.state == ATH11K_SCAN_RUNNING)) {
left = wait_for_completion_timeout(&ar->scan.completed,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left)
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive hw scan complete: timed out\n");
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"hw scan wait left time %d\n", left);
}
if (ar->state == ATH11K_STATE_RESTARTING)
return 0;
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
if (bands[band]->channels[i].flags &
IEEE80211_CHAN_DISABLED)
continue;
num_channels++;
}
}
if (WARN_ON(!num_channels))
return -EINVAL;
params = kzalloc(struct_size(params, ch_param, num_channels),
GFP_KERNEL);
if (!params)
return -ENOMEM;
params->pdev_id = ar->pdev->pdev_id;
params->nallchans = num_channels;
ch = params->ch_param;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
channel = &bands[band]->channels[i];
if (channel->flags & IEEE80211_CHAN_DISABLED)
continue;
/* TODO: Set to true/false based on some condition? */
ch->allow_ht = true;
ch->allow_vht = true;
ch->allow_he = true;
ch->dfs_set =
!!(channel->flags & IEEE80211_CHAN_RADAR);
ch->is_chan_passive = !!(channel->flags &
IEEE80211_CHAN_NO_IR);
ch->is_chan_passive |= ch->dfs_set;
ch->mhz = channel->center_freq;
ch->cfreq1 = channel->center_freq;
ch->minpower = 0;
ch->maxpower = channel->max_power * 2;
ch->maxregpower = channel->max_reg_power * 2;
ch->antennamax = channel->max_antenna_gain * 2;
/* TODO: Use appropriate phymodes */
if (channel->band == NL80211_BAND_2GHZ)
ch->phy_mode = MODE_11G;
else
ch->phy_mode = MODE_11A;
if (channel->band == NL80211_BAND_6GHZ &&
cfg80211_channel_is_psc(channel))
ch->psc_channel = true;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
i, params->nallchans,
ch->mhz, ch->maxpower, ch->maxregpower,
ch->antennamax, ch->phy_mode);
ch++;
/* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
* set_agile, reg_class_idx
*/
}
}
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
return ret;
}
static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
struct ieee80211_regdomain *regd_copy)
{
u8 i;
/* The caller should have checked error conditions */
memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
for (i = 0; i < regd_orig->n_reg_rules; i++)
memcpy(®d_copy->reg_rules[i], ®d_orig->reg_rules[i],
sizeof(struct ieee80211_reg_rule));
}
int ath11k_regd_update(struct ath11k *ar)
{
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath11k_base *ab;
ab = ar->ab;
pdev_id = ar->pdev_idx;
spin_lock_bh(&ab->base_lock);
/* Prefer the latest regd update over default if it's available */
if (ab->new_regd[pdev_id]) {
regd = ab->new_regd[pdev_id];
} else {
/* Apply the regd received during init through
* WMI_REG_CHAN_LIST_CC event. In case of failure to
* receive the regd, initialize with a default world
* regulatory.
*/
if (ab->default_regd[pdev_id]) {
regd = ab->default_regd[pdev_id];
} else {
ath11k_warn(ab,
"failed to receive default regd during init\n");
regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
}
}
if (!regd) {
ret = -EINVAL;
spin_unlock_bh(&ab->base_lock);
goto err;
}
regd_len = sizeof(*regd) + (regd->n_reg_rules *
sizeof(struct ieee80211_reg_rule));
regd_copy = kzalloc(regd_len, GFP_ATOMIC);
if (regd_copy)
ath11k_copy_regd(regd, regd_copy);
spin_unlock_bh(&ab->base_lock);
if (!regd_copy) {
ret = -ENOMEM;
goto err;
}
ret = regulatory_set_wiphy_regd(ar->hw->wiphy, regd_copy);
kfree(regd_copy);
if (ret)
goto err;
if (ar->state == ATH11K_STATE_ON) {
ret = ath11k_reg_update_chan_list(ar, true);
if (ret)
goto err;
}
return 0;
err:
ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
return ret;
}
static enum nl80211_dfs_regions
ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region)
{
switch (dfs_region) {
case ATH11K_DFS_REG_FCC:
case ATH11K_DFS_REG_CN:
return NL80211_DFS_FCC;
case ATH11K_DFS_REG_ETSI:
case ATH11K_DFS_REG_KR:
return NL80211_DFS_ETSI;
case ATH11K_DFS_REG_MKK:
case ATH11K_DFS_REG_MKK_N:
return NL80211_DFS_JP;
default:
return NL80211_DFS_UNSET;
}
}
static u32 ath11k_map_fw_reg_flags(u16 reg_flags)
{
u32 flags = 0;
if (reg_flags & REGULATORY_CHAN_NO_IR)
flags = NL80211_RRF_NO_IR;
if (reg_flags & REGULATORY_CHAN_RADAR)
flags |= NL80211_RRF_DFS;
if (reg_flags & REGULATORY_CHAN_NO_OFDM)
flags |= NL80211_RRF_NO_OFDM;
if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
flags |= NL80211_RRF_NO_OUTDOOR;
if (reg_flags & REGULATORY_CHAN_NO_HT40)
flags |= NL80211_RRF_NO_HT40;
if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
flags |= NL80211_RRF_NO_80MHZ;
if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
flags |= NL80211_RRF_NO_160MHZ;
return flags;
}
static bool
ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
if ((start_freq1 >= start_freq2 &&
start_freq1 < end_freq2) ||
(start_freq2 > start_freq1 &&
start_freq2 < end_freq1))
return true;
/* TODO: Should we restrict intersection feasibility
* based on min bandwidth of the intersected region also,
* say the intersected rule should have a min bandwidth
* of 20MHz?
*/
return false;
}
static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2,
struct ieee80211_reg_rule *new_rule)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
u32 freq_diff, max_bw;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
start_freq2);
new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
freq_diff = new_rule->freq_range.end_freq_khz -
new_rule->freq_range.start_freq_khz;
max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
rule2->freq_range.max_bandwidth_khz);
new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
new_rule->power_rule.max_antenna_gain =
min_t(u32, rule1->power_rule.max_antenna_gain,
rule2->power_rule.max_antenna_gain);
new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
rule2->power_rule.max_eirp);
/* Use the flags of both the rules */
new_rule->flags = rule1->flags | rule2->flags;
/* To be safe, lts use the max cac timeout of both rules */
new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
}
static struct ieee80211_regdomain *
ath11k_regd_intersect(struct ieee80211_regdomain *default_regd,
struct ieee80211_regdomain *curr_regd)
{
u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
struct ieee80211_regdomain *new_regd = NULL;
u8 i, j, k;
num_old_regd_rules = default_regd->n_reg_rules;
num_curr_regd_rules = curr_regd->n_reg_rules;
num_new_regd_rules = 0;
/* Find the number of intersecting rules to allocate new regd memory */
for (i = 0; i < num_old_regd_rules; i++) {
old_rule = default_regd->reg_rules + i;
for (j = 0; j < num_curr_regd_rules; j++) {
curr_rule = curr_regd->reg_rules + j;
if (ath11k_reg_can_intersect(old_rule, curr_rule))
num_new_regd_rules++;
}
}
if (!num_new_regd_rules)
return NULL;
new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!new_regd)
return NULL;
/* We set the new country and dfs region directly and only trim
* the freq, power, antenna gain by intersecting with the
* default regdomain. Also MAX of the dfs cac timeout is selected.
*/
new_regd->n_reg_rules = num_new_regd_rules;
memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
new_regd->dfs_region = curr_regd->dfs_region;
new_rule = new_regd->reg_rules;
for (i = 0, k = 0; i < num_old_regd_rules; i++) {
old_rule = default_regd->reg_rules + i;
for (j = 0; j < num_curr_regd_rules; j++) {
curr_rule = curr_regd->reg_rules + j;
if (ath11k_reg_can_intersect(old_rule, curr_rule))
ath11k_reg_intersect_rules(old_rule, curr_rule,
(new_rule + k++));
}
}
return new_regd;
}
static const char *
ath11k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
{
switch (dfs_region) {
case NL80211_DFS_FCC:
return "FCC";
case NL80211_DFS_ETSI:
return "ETSI";
case NL80211_DFS_JP:
return "JP";
default:
return "UNSET";
}
}
static u16
ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
{
u16 bw;
if (end_freq <= start_freq)
return 0;
bw = end_freq - start_freq;
bw = min_t(u16, bw, max_bw);
if (bw >= 80 && bw < 160)
bw = 80;
else if (bw >= 40 && bw < 80)
bw = 40;
else if (bw >= 20 && bw < 40)
bw = 20;
else
bw = 0;
return bw;
}
static void
ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
reg_rule->flags = reg_flags;
}
static void
ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
struct ieee80211_regdomain *regd,
struct cur_reg_rule *reg_rule,
u8 *rule_idx, u32 flags, u16 max_bw)
{
u32 start_freq;
u32 end_freq;
u16 bw;
u8 i;
i = *rule_idx;
/* there might be situations when even the input rule must be dropped */
i--;
/* frequencies below weather radar */
bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
if (bw > 0) {
i++;
ath11k_reg_update_rule(regd->reg_rules + i,
reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain,
reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
flags);
}
/* weather radar frequencies */
start_freq = max_t(u32, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW);
end_freq = min_t(u32, reg_rule->end_freq, ETSI_WEATHER_RADAR_BAND_HIGH);
bw = ath11k_reg_adjust_bw(start_freq, end_freq, max_bw);
if (bw > 0) {
i++;
ath11k_reg_update_rule(regd->reg_rules + i, start_freq,
end_freq, bw, reg_rule->ant_gain,
reg_rule->reg_power, flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, start_freq, end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms, flags);
}
/* frequencies above weather radar */
bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, max_bw);
if (bw > 0) {
i++;
ath11k_reg_update_rule(regd->reg_rules + i,
ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw, reg_rule->ant_gain,
reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
flags);
}
*rule_idx = i;
}
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect)
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
struct cur_reg_rule *reg_rule;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
u32 flags;
char alpha2[3];
num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules;
/* FIXME: Currently taking reg rules for 6 GHz only from Indoor AP mode list.
* This can be updated after complete 6 GHz regulatory support is added.
*/
if (reg_info->is_ext_reg_event)
num_rules += reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
if (!num_rules)
goto ret;
/* Add max additional rules to accommodate weather radar band */
if (reg_info->dfs_region == ATH11K_DFS_REG_ETSI)
num_rules += 2;
tmp_regd = kzalloc(sizeof(*tmp_regd) +
(num_rules * sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!tmp_regd)
goto ret;
memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
alpha2[2] = '\0';
tmp_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region);
ath11k_dbg(ab, ATH11K_DBG_REG,
"Country %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
alpha2, ath11k_reg_get_regdom_str(tmp_regd->dfs_region),
reg_info->dfs_region, num_rules);
/* Update reg_rules[] below. Firmware is expected to
* send these rules in order(2 GHz rules first and then 5 GHz)
*/
for (; i < num_rules; i++) {
if (reg_info->num_2ghz_reg_rules &&
(i < reg_info->num_2ghz_reg_rules)) {
reg_rule = reg_info->reg_rules_2ghz_ptr + i;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_2ghz);
flags = 0;
} else if (reg_info->num_5ghz_reg_rules &&
(j < reg_info->num_5ghz_reg_rules)) {
reg_rule = reg_info->reg_rules_5ghz_ptr + j++;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_5ghz);
/* FW doesn't pass NL80211_RRF_AUTO_BW flag for
* BW Auto correction, we can enable this by default
* for all 5G rules here. The regulatory core performs
* BW correction if required and applies flags as
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
} else if (reg_info->is_ext_reg_event &&
reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] &&
(k < reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP])) {
reg_rule = reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP] +
k++;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]);
flags = NL80211_RRF_AUTO_BW;
} else {
break;
}
flags |= ath11k_map_fw_reg_flags(reg_rule->flags);
ath11k_reg_update_rule(tmp_regd->reg_rules + i,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
* Default value of '0' corresponds to 60s timeout, so no
* need to update that for other rules.
*/
if (flags & NL80211_RRF_DFS &&
reg_info->dfs_region == ATH11K_DFS_REG_ETSI &&
(reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
ath11k_reg_update_weather_radar_band(ab, tmp_regd,
reg_rule, &i,
flags, max_bw);
continue;
}
if (reg_info->is_ext_reg_event) {
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms, flags,
reg_rule->psd_flag, reg_rule->psd_eirp);
} else {
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms,
flags);
}
}
tmp_regd->n_reg_rules = i;
if (intersect) {
default_regd = ab->default_regd[reg_info->phy_id];
/* Get a new regd by intersecting the received regd with
* our default regd.
*/
new_regd = ath11k_regd_intersect(default_regd, tmp_regd);
kfree(tmp_regd);
if (!new_regd) {
ath11k_warn(ab, "Unable to create intersected regdomain\n");
goto ret;
}
} else {
new_regd = tmp_regd;
}
ret:
return new_regd;
}
void ath11k_regd_update_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
regd_update_work);
int ret;
ret = ath11k_regd_update(ar);
if (ret) {
/* Firmware has already moved to the new regd. We need
* to maintain channel consistency across FW, Host driver
* and userspace. Hence as a fallback mechanism we can set
* the prev or default country code to the firmware.
*/
/* TODO: Implement Fallback Mechanism */
}
}
void ath11k_reg_init(struct ath11k *ar)
{
ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
}
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/reg.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include <linux/of.h>
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
#include "wow.h"
unsigned int ath11k_debug_mask;
EXPORT_SYMBOL(ath11k_debug_mask);
module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
static unsigned int ath11k_crypto_mode;
module_param_named(crypto_mode, ath11k_crypto_mode, uint, 0644);
MODULE_PARM_DESC(crypto_mode, "crypto mode: 0-hardware, 1-software");
/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
MODULE_PARM_DESC(frame_mode,
"Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
bool ath11k_ftm_mode;
module_param_named(ftm_mode, ath11k_ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
static const struct ath11k_hw_params ath11k_hw_params[] = {
{
.hw_rev = ATH11K_HW_IPQ8074,
.name = "ipq8074 hw2.0",
.fw = {
.dir = "IPQ8074/hw2.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
.hw_ops = &ipq8074_ops,
.ring_mask = &ath11k_hw_ring_mask_ipq8074,
.internal_sleep_clock = false,
.regs = &ipq8074_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
.host_ce_config = ath11k_host_ce_config_ipq8074,
.ce_count = 12,
.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
.svc_to_ce_map_len = 21,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.spectral = {
.fft_sz = 2,
/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
* so added pad size as 2 bytes to compensate the BIN size
*/
.fft_pad_sz = 2,
.summary_pad_sz = 0,
.fft_hdr_len = 16,
.max_fft_bins = 512,
.fragment_160mhz = true,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = true,
.full_monitor_mode = false,
.supports_shadow_regs = false,
.idle_ps = false,
.supports_sta_ps = false,
.coldboot_cal_mm = true,
.coldboot_cal_ftm = true,
.cbcal_restart_fw = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.supports_regdb = false,
.fix_l1ss = true,
.credit_flow = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
.global_reset = false,
.bios_sar_capa = NULL,
.m3_fw_support = false,
.fixed_bdf_addr = true,
.fixed_mem_region = true,
.static_window_map = false,
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
.supports_multi_bssid = false,
.sram_dump = {},
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
.name = "ipq6018 hw1.0",
.fw = {
.dir = "IPQ6018/hw1.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 2,
.bdf_addr = 0x4ABC0000,
.hw_ops = &ipq6018_ops,
.ring_mask = &ath11k_hw_ring_mask_ipq8074,
.internal_sleep_clock = false,
.regs = &ipq8074_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
.host_ce_config = ath11k_host_ce_config_ipq8074,
.ce_count = 12,
.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
.svc_to_ce_map_len = 19,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.spectral = {
.fft_sz = 4,
.fft_pad_sz = 0,
.summary_pad_sz = 0,
.fft_hdr_len = 16,
.max_fft_bins = 512,
.fragment_160mhz = true,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = true,
.full_monitor_mode = false,
.supports_shadow_regs = false,
.idle_ps = false,
.supports_sta_ps = false,
.coldboot_cal_mm = true,
.coldboot_cal_ftm = true,
.cbcal_restart_fw = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.supports_regdb = false,
.fix_l1ss = true,
.credit_flow = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
.global_reset = false,
.bios_sar_capa = NULL,
.m3_fw_support = false,
.fixed_bdf_addr = true,
.fixed_mem_region = true,
.static_window_map = false,
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
.supports_multi_bssid = false,
.sram_dump = {},
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
},
{
.name = "qca6390 hw2.0",
.hw_rev = ATH11K_HW_QCA6390_HW20,
.fw = {
.dir = "QCA6390/hw2.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
.hw_ops = &qca6390_ops,
.ring_mask = &ath11k_hw_ring_mask_qca6390,
.internal_sleep_clock = true,
.regs = &qca6390_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
.host_ce_config = ath11k_host_ce_config_qca6390,
.ce_count = 9,
.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.spectral = {
.fft_sz = 0,
.fft_pad_sz = 0,
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
.fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.coldboot_cal_mm = false,
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.supports_regdb = false,
.fix_l1ss = true,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
.global_reset = true,
.bios_sar_capa = NULL,
.m3_fw_support = true,
.fixed_bdf_addr = false,
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
.supports_multi_bssid = true,
.sram_dump = {
.start = 0x01400000,
.end = 0x0171ffff,
},
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
},
{
.name = "qcn9074 hw1.0",
.hw_rev = ATH11K_HW_QCN9074_HW10,
.fw = {
.dir = "QCN9074/hw1.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 1,
.single_pdev_only = false,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074,
.hw_ops = &qcn9074_ops,
.ring_mask = &ath11k_hw_ring_mask_qcn9074,
.internal_sleep_clock = false,
.regs = &qcn9074_regs,
.host_ce_config = ath11k_host_ce_config_qcn9074,
.ce_count = 6,
.target_ce_config = ath11k_target_ce_config_wlan_qcn9074,
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
.svc_to_ce_map_len = 18,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.spectral = {
.fft_sz = 2,
.fft_pad_sz = 0,
.summary_pad_sz = 16,
.fft_hdr_len = 24,
.max_fft_bins = 1024,
.fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = true,
.full_monitor_mode = true,
.supports_shadow_regs = false,
.idle_ps = false,
.supports_sta_ps = false,
.coldboot_cal_mm = false,
.coldboot_cal_ftm = true,
.cbcal_restart_fw = true,
.fw_mem_mode = 2,
.num_vdevs = 8,
.num_peers = 128,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
.supports_regdb = false,
.fix_l1ss = true,
.credit_flow = false,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = true,
.alloc_cacheable_memory = true,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
.global_reset = false,
.bios_sar_capa = NULL,
.m3_fw_support = true,
.fixed_bdf_addr = false,
.fixed_mem_region = false,
.static_window_map = true,
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
.supports_multi_bssid = false,
.sram_dump = {},
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
},
{
.name = "wcn6855 hw2.0",
.hw_rev = ATH11K_HW_WCN6855_HW20,
.fw = {
.dir = "WCN6855/hw2.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
.hw_ops = &wcn6855_ops,
.ring_mask = &ath11k_hw_ring_mask_qca6390,
.internal_sleep_clock = true,
.regs = &wcn6855_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
.host_ce_config = ath11k_host_ce_config_qca6390,
.ce_count = 9,
.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.spectral = {
.fft_sz = 0,
.fft_pad_sz = 0,
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
.fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.coldboot_cal_mm = false,
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
.global_reset = true,
.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
.m3_fw_support = true,
.fixed_bdf_addr = false,
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
.supports_multi_bssid = true,
.sram_dump = {
.start = 0x01400000,
.end = 0x0177ffff,
},
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
},
{
.name = "wcn6855 hw2.1",
.hw_rev = ATH11K_HW_WCN6855_HW21,
.fw = {
.dir = "WCN6855/hw2.1",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
.hw_ops = &wcn6855_ops,
.ring_mask = &ath11k_hw_ring_mask_qca6390,
.internal_sleep_clock = true,
.regs = &wcn6855_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
.host_ce_config = ath11k_host_ce_config_qca6390,
.ce_count = 9,
.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.spectral = {
.fft_sz = 0,
.fft_pad_sz = 0,
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
.fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.coldboot_cal_mm = false,
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
.global_reset = true,
.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
.m3_fw_support = true,
.fixed_bdf_addr = false,
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
.supports_multi_bssid = true,
.sram_dump = {
.start = 0x01400000,
.end = 0x0177ffff,
},
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
},
{
.name = "wcn6750 hw1.0",
.hw_rev = ATH11K_HW_WCN6750_HW10,
.fw = {
.dir = "WCN6750/hw1.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 1,
.bdf_addr = 0x4B0C0000,
.hw_ops = &wcn6750_ops,
.ring_mask = &ath11k_hw_ring_mask_wcn6750,
.internal_sleep_clock = false,
.regs = &wcn6750_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
.host_ce_config = ath11k_host_ce_config_qca6390,
.ce_count = 9,
.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.spectral = {
.fft_sz = 0,
.fft_pad_sz = 0,
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
.fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
.coldboot_cal_mm = true,
.coldboot_cal_ftm = true,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX,
.hal_params = &ath11k_hw_hal_params_wcn6750,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
.fw_wmi_diag_event = false,
.current_cc_support = true,
.dbr_debug_support = false,
.global_reset = false,
.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
.m3_fw_support = false,
.fixed_bdf_addr = false,
.fixed_mem_region = false,
.static_window_map = true,
.hybrid_bus_type = true,
.fixed_fw_mem = true,
.support_off_channel_tx = true,
.supports_multi_bssid = true,
.sram_dump = {},
.tcl_ring_retry = false,
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
.name = "ipq5018 hw1.0",
.fw = {
.dir = "IPQ5018/hw1.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = MAX_RADIOS_5018,
.bdf_addr = 0x4BA00000,
/* hal_desc_sz and hw ops are similar to qcn9074 */
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
.ring_mask = &ath11k_hw_ring_mask_ipq8074,
.credit_flow = false,
.max_tx_ring = 1,
.spectral = {
.fft_sz = 2,
.fft_pad_sz = 0,
.summary_pad_sz = 16,
.fft_hdr_len = 24,
.max_fft_bins = 1024,
},
.internal_sleep_clock = false,
.regs = &ipq5018_regs,
.hw_ops = &ipq5018_ops,
.host_ce_config = ath11k_host_ce_config_qcn9074,
.ce_count = CE_CNT_5018,
.target_ce_config = ath11k_target_ce_config_wlan_ipq5018,
.target_ce_count = TARGET_CE_CNT_5018,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq5018,
.svc_to_ce_map_len = SVC_CE_MAP_LEN_5018,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq5018,
.ce_remap = &ath11k_ce_remap_ipq5018,
.rxdma1_enable = true,
.num_rxmda_per_pdev = RXDMA_PER_PDEV_5018,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
.supports_monitor = false,
.supports_sta_ps = false,
.supports_shadow_regs = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
.supports_regdb = false,
.idle_ps = false,
.supports_suspend = false,
.hal_params = &ath11k_hw_hal_params_ipq8074,
.single_pdev_only = false,
.coldboot_cal_mm = true,
.coldboot_cal_ftm = true,
.cbcal_restart_fw = true,
.fix_l1ss = true,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
.current_cc_support = false,
.dbr_debug_support = true,
.global_reset = false,
.bios_sar_capa = NULL,
.m3_fw_support = false,
.fixed_bdf_addr = true,
.fixed_mem_region = true,
.static_window_map = false,
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
.supports_multi_bssid = false,
.sram_dump = {},
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
},
};
static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
{
WARN_ON(!ab->hw_params.single_pdev_only);
return &ab->pdevs[0];
}
void ath11k_fw_stats_pdevs_free(struct list_head *head)
{
struct ath11k_fw_stats_pdev *i, *tmp;
list_for_each_entry_safe(i, tmp, head, list) {
list_del(&i->list);
kfree(i);
}
}
void ath11k_fw_stats_vdevs_free(struct list_head *head)
{
struct ath11k_fw_stats_vdev *i, *tmp;
list_for_each_entry_safe(i, tmp, head, list) {
list_del(&i->list);
kfree(i);
}
}
void ath11k_fw_stats_bcn_free(struct list_head *head)
{
struct ath11k_fw_stats_bcn *i, *tmp;
list_for_each_entry_safe(i, tmp, head, list) {
list_del(&i->list);
kfree(i);
}
}
void ath11k_fw_stats_init(struct ath11k *ar)
{
INIT_LIST_HEAD(&ar->fw_stats.pdevs);
INIT_LIST_HEAD(&ar->fw_stats.vdevs);
INIT_LIST_HEAD(&ar->fw_stats.bcn);
init_completion(&ar->fw_stats_complete);
}
void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
{
ath11k_fw_stats_pdevs_free(&stats->pdevs);
ath11k_fw_stats_vdevs_free(&stats->vdevs);
ath11k_fw_stats_bcn_free(&stats->bcn);
}
bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab)
{
if (!ath11k_cold_boot_cal)
return false;
if (ath11k_ftm_mode)
return ab->hw_params.coldboot_cal_ftm;
else
return ab->hw_params.coldboot_cal_mm;
}
int ath11k_core_suspend(struct ath11k_base *ab)
{
int ret;
struct ath11k_pdev *pdev;
struct ath11k *ar;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
/* so far single_pdev_only chips have supports_suspend as true
* and only the first pdev is valid.
*/
pdev = ath11k_core_get_single_pdev(ab);
ar = pdev->ar;
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
ret = ath11k_dp_rx_pktlog_stop(ab, true);
if (ret) {
ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
ret);
return ret;
}
ret = ath11k_mac_wait_tx_complete(ar);
if (ret) {
ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
ret = ath11k_wow_enable(ab);
if (ret) {
ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret);
return ret;
}
ret = ath11k_dp_rx_pktlog_stop(ab, false);
if (ret) {
ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
ret);
return ret;
}
ath11k_ce_stop_shadow_timers(ab);
ath11k_dp_stop_shadow_timers(ab);
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
ret = ath11k_hif_suspend(ab);
if (ret) {
ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(ath11k_core_suspend);
int ath11k_core_resume(struct ath11k_base *ab)
{
int ret;
struct ath11k_pdev *pdev;
struct ath11k *ar;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
/* so far signle_pdev_only chips have supports_suspend as true
* and only the first pdev is valid.
*/
pdev = ath11k_core_get_single_pdev(ab);
ar = pdev->ar;
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
ret = ath11k_hif_resume(ab);
if (ret) {
ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret);
return ret;
}
ath11k_hif_ce_irq_enable(ab);
ath11k_hif_irq_enable(ab);
ret = ath11k_dp_rx_pktlog_start(ab);
if (ret) {
ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
ret);
return ret;
}
ret = ath11k_wow_wakeup(ab);
if (ret) {
ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(ath11k_core_resume);
static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
{
struct ath11k_base *ab = data;
const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC;
struct ath11k_smbios_bdf *smbios = (struct ath11k_smbios_bdf *)hdr;
ssize_t copied;
size_t len;
int i;
if (ab->qmi.target.bdf_ext[0] != '\0')
return;
if (hdr->type != ATH11K_SMBIOS_BDF_EXT_TYPE)
return;
if (hdr->length != ATH11K_SMBIOS_BDF_EXT_LENGTH) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"wrong smbios bdf ext type length (%d).\n",
hdr->length);
return;
}
spin_lock_bh(&ab->base_lock);
switch (smbios->country_code_flag) {
case ATH11K_SMBIOS_CC_ISO:
ab->new_alpha2[0] = (smbios->cc_code >> 8) & 0xff;
ab->new_alpha2[1] = smbios->cc_code & 0xff;
ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios cc_code %c%c\n",
ab->new_alpha2[0], ab->new_alpha2[1]);
break;
case ATH11K_SMBIOS_CC_WW:
ab->new_alpha2[0] = '0';
ab->new_alpha2[1] = '0';
ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios worldwide regdomain\n");
break;
default:
ath11k_dbg(ab, ATH11K_DBG_BOOT, "ignore smbios country code setting %d\n",
smbios->country_code_flag);
break;
}
spin_unlock_bh(&ab->base_lock);
if (!smbios->bdf_enabled) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n");
return;
}
/* Only one string exists (per spec) */
if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"bdf variant magic does not match.\n");
return;
}
len = min_t(size_t,
strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
for (i = 0; i < len; i++) {
if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"bdf variant name contains non ascii chars.\n");
return;
}
}
/* Copy extension name without magic prefix */
copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
sizeof(ab->qmi.target.bdf_ext));
if (copied < 0) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"bdf variant string is longer than the buffer can accommodate\n");
return;
}
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"found and validated bdf variant smbios_type 0x%x bdf %s\n",
ATH11K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
}
int ath11k_core_check_smbios(struct ath11k_base *ab)
{
ab->qmi.target.bdf_ext[0] = '\0';
dmi_walk(ath11k_core_check_cc_code_bdfext, ab);
if (ab->qmi.target.bdf_ext[0] == '\0')
return -ENODATA;
return 0;
}
int ath11k_core_check_dt(struct ath11k_base *ab)
{
size_t max_len = sizeof(ab->qmi.target.bdf_ext);
const char *variant = NULL;
struct device_node *node;
node = ab->dev->of_node;
if (!node)
return -ENOENT;
of_property_read_string(node, "qcom,ath11k-calibration-variant",
&variant);
if (!variant)
return -ENODATA;
if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0)
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
variant);
return 0;
}
static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
size_t name_len, bool with_variant,
bool bus_type_mode)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
switch (ab->id.bdf_search) {
case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
if (bus_type_mode)
scnprintf(name, name_len,
"bus=%s",
ath11k_bus_str(ab->hif.bus));
else
scnprintf(name, name_len,
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
ath11k_bus_str(ab->hif.bus),
ab->id.vendor, ab->id.device,
ab->id.subsystem_vendor,
ab->id.subsystem_device,
ab->qmi.target.chip_id,
ab->qmi.target.board_id,
variant);
break;
default:
scnprintf(name, name_len,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath11k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
ab->qmi.target.board_id, variant);
break;
}
ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board name '%s'\n", name);
return 0;
}
static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
{
return __ath11k_core_create_board_name(ab, name, name_len, true, false);
}
static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
{
return __ath11k_core_create_board_name(ab, name, name_len, false, false);
}
static int ath11k_core_create_bus_type_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
{
return __ath11k_core_create_board_name(ab, name, name_len, false, true);
}
const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
const char *file)
{
const struct firmware *fw;
char path[100];
int ret;
if (file == NULL)
return ERR_PTR(-ENOENT);
ath11k_core_create_firmware_path(ab, file, path, sizeof(path));
ret = firmware_request_nowarn(&fw, path, ab->dev);
if (ret)
return ERR_PTR(ret);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "firmware request %s size %zu\n",
path, fw->size);
return fw;
}
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
if (!IS_ERR(bd->fw))
release_firmware(bd->fw);
memset(bd, 0, sizeof(*bd));
}
static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
struct ath11k_board_data *bd,
const void *buf, size_t buf_len,
const char *boardname,
int ie_id,
int name_id,
int data_id)
{
const struct ath11k_fw_ie *hdr;
bool name_match_found;
int ret, board_ie_id;
size_t board_ie_len;
const void *board_ie_data;
name_match_found = false;
/* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */
while (buf_len > sizeof(struct ath11k_fw_ie)) {
hdr = buf;
board_ie_id = le32_to_cpu(hdr->id);
board_ie_len = le32_to_cpu(hdr->len);
board_ie_data = hdr->data;
buf_len -= sizeof(*hdr);
buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) {
ath11k_err(ab, "invalid %s length: %zu < %zu\n",
ath11k_bd_ie_type_str(ie_id),
buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL;
goto out;
}
if (board_ie_id == name_id) {
ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname))
goto next;
ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret)
goto next;
name_match_found = true;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"found match %s for name '%s'",
ath11k_bd_ie_type_str(ie_id),
boardname);
} else if (board_ie_id == data_id) {
if (!name_match_found)
/* no match found */
goto next;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"found %s for '%s'",
ath11k_bd_ie_type_str(ie_id),
boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0;
goto out;
} else {
ath11k_warn(ab, "unknown %s id found: %d\n",
ath11k_bd_ie_type_str(ie_id),
board_ie_id);
}
next:
/* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
buf_len -= board_ie_len;
buf += board_ie_len;
}
/* no match found */
ret = -ENOENT;
out:
return ret;
}
static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
struct ath11k_board_data *bd,
const char *boardname,
int ie_id_match,
int name_id,
int data_id)
{
size_t len, magic_len;
const u8 *data;
char *filename, filepath[100];
size_t ie_len;
struct ath11k_fw_ie *hdr;
int ret, ie_id;
filename = ATH11K_BOARD_API2_FILE;
if (!bd->fw)
bd->fw = ath11k_core_firmware_request(ab, filename);
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
data = bd->fw->data;
len = bd->fw->size;
ath11k_core_create_firmware_path(ab, filename,
filepath, sizeof(filepath));
/* magic has extra null byte padded */
magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
if (len < magic_len) {
ath11k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
filepath, len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
ath11k_err(ab, "found invalid board magic\n");
ret = -EINVAL;
goto err;
}
/* magic is padded to 4 bytes */
magic_len = ALIGN(magic_len, 4);
if (len < magic_len) {
ath11k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
filepath, len);
ret = -EINVAL;
goto err;
}
data += magic_len;
len -= magic_len;
while (len > sizeof(struct ath11k_fw_ie)) {
hdr = (struct ath11k_fw_ie *)data;
ie_id = le32_to_cpu(hdr->id);
ie_len = le32_to_cpu(hdr->len);
len -= sizeof(*hdr);
data = hdr->data;
if (len < ALIGN(ie_len, 4)) {
ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
ie_id, ie_len, len);
ret = -EINVAL;
goto err;
}
if (ie_id == ie_id_match) {
ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
ie_id_match,
name_id,
data_id);
if (ret == -ENOENT)
/* no match found, continue */
goto next;
else if (ret)
/* there was an error, bail out */
goto err;
/* either found or error, so stop searching */
goto out;
}
next:
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
len -= ie_len;
data += ie_len;
}
out:
if (!bd->data || !bd->len) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"failed to fetch %s for %s from %s\n",
ath11k_bd_ie_type_str(ie_id_match),
boardname, filepath);
ret = -ENODATA;
goto err;
}
return 0;
err:
ath11k_core_free_bdf(ab, bd);
return ret;
}
int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
struct ath11k_board_data *bd,
const char *name)
{
bd->fw = ath11k_core_firmware_request(ab, name);
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
bd->data = bd->fw->data;
bd->len = bd->fw->size;
return 0;
}
#define BOARD_NAME_SIZE 200
int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
char *filename, filepath[100];
int ret;
filename = ATH11K_BOARD_API2_FILE;
ret = ath11k_core_create_board_name(ab, boardname, sizeof(boardname));
if (ret) {
ath11k_err(ab, "failed to create board name: %d", ret);
return ret;
}
ab->bd_api = 2;
ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
ATH11K_BD_IE_BOARD,
ATH11K_BD_IE_BOARD_NAME,
ATH11K_BD_IE_BOARD_DATA);
if (!ret)
goto success;
ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname,
sizeof(fallback_boardname));
if (ret) {
ath11k_err(ab, "failed to create fallback board name: %d", ret);
return ret;
}
ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
ATH11K_BD_IE_BOARD,
ATH11K_BD_IE_BOARD_NAME,
ATH11K_BD_IE_BOARD_DATA);
if (!ret)
goto success;
ab->bd_api = 1;
ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE);
if (ret) {
ath11k_core_create_firmware_path(ab, filename,
filepath, sizeof(filepath));
ath11k_err(ab, "failed to fetch board data for %s from %s\n",
boardname, filepath);
if (memcmp(boardname, fallback_boardname, strlen(boardname)))
ath11k_err(ab, "failed to fetch board data for %s from %s\n",
fallback_boardname, filepath);
ath11k_err(ab, "failed to fetch board.bin from %s\n",
ab->hw_params.fw.dir);
return ret;
}
success:
ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", ab->bd_api);
return 0;
}
int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd)
{
char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
int ret;
ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"failed to create board name for regdb: %d", ret);
goto exit;
}
ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
ATH11K_BD_IE_REGDB,
ATH11K_BD_IE_REGDB_NAME,
ATH11K_BD_IE_REGDB_DATA);
if (!ret)
goto exit;
ret = ath11k_core_create_bus_type_board_name(ab, default_boardname,
BOARD_NAME_SIZE);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"failed to create default board name for regdb: %d", ret);
goto exit;
}
ret = ath11k_core_fetch_board_data_api_n(ab, bd, default_boardname,
ATH11K_BD_IE_REGDB,
ATH11K_BD_IE_REGDB_NAME,
ATH11K_BD_IE_REGDB_DATA);
if (!ret)
goto exit;
ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME);
if (ret)
ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n",
ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir);
exit:
if (!ret)
ath11k_dbg(ab, ATH11K_DBG_BOOT, "fetched regdb\n");
return ret;
}
static void ath11k_core_stop(struct ath11k_base *ab)
{
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath11k_qmi_firmware_stop(ab);
ath11k_hif_stop(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
/* De-Init of components as needed */
}
static int ath11k_core_soc_create(struct ath11k_base *ab)
{
int ret;
if (ath11k_ftm_mode) {
ab->fw_mode = ATH11K_FIRMWARE_MODE_FTM;
ath11k_info(ab, "Booting in factory test mode\n");
}
ret = ath11k_qmi_init_service(ab);
if (ret) {
ath11k_err(ab, "failed to initialize qmi :%d\n", ret);
return ret;
}
ret = ath11k_debugfs_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create ath11k debugfs\n");
goto err_qmi_deinit;
}
ret = ath11k_hif_power_up(ab);
if (ret) {
ath11k_err(ab, "failed to power up :%d\n", ret);
goto err_debugfs_reg;
}
return 0;
err_debugfs_reg:
ath11k_debugfs_soc_destroy(ab);
err_qmi_deinit:
ath11k_qmi_deinit_service(ab);
return ret;
}
static void ath11k_core_soc_destroy(struct ath11k_base *ab)
{
ath11k_debugfs_soc_destroy(ab);
ath11k_dp_free(ab);
ath11k_reg_free(ab);
ath11k_qmi_deinit_service(ab);
}
static int ath11k_core_pdev_create(struct ath11k_base *ab)
{
int ret;
ret = ath11k_debugfs_pdev_create(ab);
if (ret) {
ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
return ret;
}
ret = ath11k_dp_pdev_alloc(ab);
if (ret) {
ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
goto err_pdev_debug;
}
ret = ath11k_mac_register(ab);
if (ret) {
ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
goto err_dp_pdev_free;
}
ret = ath11k_thermal_register(ab);
if (ret) {
ath11k_err(ab, "could not register thermal device: %d\n",
ret);
goto err_mac_unregister;
}
ret = ath11k_spectral_init(ab);
if (ret) {
ath11k_err(ab, "failed to init spectral %d\n", ret);
goto err_thermal_unregister;
}
return 0;
err_thermal_unregister:
ath11k_thermal_unregister(ab);
err_mac_unregister:
ath11k_mac_unregister(ab);
err_dp_pdev_free:
ath11k_dp_pdev_free(ab);
err_pdev_debug:
ath11k_debugfs_pdev_destroy(ab);
return ret;
}
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
ath11k_spectral_deinit(ab);
ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_debugfs_pdev_destroy(ab);
}
static int ath11k_core_start(struct ath11k_base *ab)
{
int ret;
ret = ath11k_wmi_attach(ab);
if (ret) {
ath11k_err(ab, "failed to attach wmi: %d\n", ret);
return ret;
}
ret = ath11k_htc_init(ab);
if (ret) {
ath11k_err(ab, "failed to init htc: %d\n", ret);
goto err_wmi_detach;
}
ret = ath11k_hif_start(ab);
if (ret) {
ath11k_err(ab, "failed to start HIF: %d\n", ret);
goto err_wmi_detach;
}
ret = ath11k_htc_wait_target(&ab->htc);
if (ret) {
ath11k_err(ab, "failed to connect to HTC: %d\n", ret);
goto err_hif_stop;
}
ret = ath11k_dp_htt_connect(&ab->dp);
if (ret) {
ath11k_err(ab, "failed to connect to HTT: %d\n", ret);
goto err_hif_stop;
}
ret = ath11k_wmi_connect(ab);
if (ret) {
ath11k_err(ab, "failed to connect wmi: %d\n", ret);
goto err_hif_stop;
}
ret = ath11k_htc_start(&ab->htc);
if (ret) {
ath11k_err(ab, "failed to start HTC: %d\n", ret);
goto err_hif_stop;
}
ret = ath11k_wmi_wait_for_service_ready(ab);
if (ret) {
ath11k_err(ab, "failed to receive wmi service ready event: %d\n",
ret);
goto err_hif_stop;
}
ret = ath11k_mac_allocate(ab);
if (ret) {
ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
ret);
goto err_hif_stop;
}
ath11k_dp_pdev_pre_alloc(ab);
ret = ath11k_dp_pdev_reo_setup(ab);
if (ret) {
ath11k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
goto err_mac_destroy;
}
ret = ath11k_wmi_cmd_init(ab);
if (ret) {
ath11k_err(ab, "failed to send wmi init cmd: %d\n", ret);
goto err_reo_cleanup;
}
ret = ath11k_wmi_wait_for_unified_ready(ab);
if (ret) {
ath11k_err(ab, "failed to receive wmi unified ready event: %d\n",
ret);
goto err_reo_cleanup;
}
/* put hardware to DBS mode */
if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) {
ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
if (ret) {
ath11k_err(ab, "failed to send dbs mode: %d\n", ret);
goto err_hif_stop;
}
}
ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab);
if (ret) {
ath11k_err(ab, "failed to send htt version request message: %d\n",
ret);
goto err_reo_cleanup;
}
return 0;
err_reo_cleanup:
ath11k_dp_pdev_reo_cleanup(ab);
err_mac_destroy:
ath11k_mac_destroy(ab);
err_hif_stop:
ath11k_hif_stop(ab);
err_wmi_detach:
ath11k_wmi_detach(ab);
return ret;
}
static int ath11k_core_start_firmware(struct ath11k_base *ab,
enum ath11k_firmware_mode mode)
{
int ret;
ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
&ab->qmi.ce_cfg.shadow_reg_v2_len);
ret = ath11k_qmi_firmware_start(ab, mode);
if (ret) {
ath11k_err(ab, "failed to send firmware start: %d\n", ret);
return ret;
}
return ret;
}
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
ret = ath11k_core_start_firmware(ab, ab->fw_mode);
if (ret) {
ath11k_err(ab, "failed to start firmware: %d\n", ret);
return ret;
}
ret = ath11k_ce_init_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to initialize CE: %d\n", ret);
goto err_firmware_stop;
}
ret = ath11k_dp_alloc(ab);
if (ret) {
ath11k_err(ab, "failed to init DP: %d\n", ret);
goto err_firmware_stop;
}
switch (ath11k_crypto_mode) {
case ATH11K_CRYPT_MODE_SW:
set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
break;
case ATH11K_CRYPT_MODE_HW:
clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
break;
default:
ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
return -EINVAL;
}
if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
mutex_lock(&ab->core_lock);
ret = ath11k_core_start(ab);
if (ret) {
ath11k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
}
ret = ath11k_core_pdev_create(ab);
if (ret) {
ath11k_err(ab, "failed to create pdev core: %d\n", ret);
goto err_core_stop;
}
ath11k_hif_irq_enable(ab);
mutex_unlock(&ab->core_lock);
return 0;
err_core_stop:
ath11k_core_stop(ab);
ath11k_mac_destroy(ab);
err_dp_free:
ath11k_dp_free(ab);
mutex_unlock(&ab->core_lock);
err_firmware_stop:
ath11k_qmi_firmware_stop(ab);
return ret;
}
static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
{
int ret;
mutex_lock(&ab->core_lock);
ath11k_thermal_unregister(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_spectral_deinit(ab);
ath11k_hif_stop(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
ath11k_dp_free(ab);
ath11k_hal_srng_deinit(ab);
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
ret = ath11k_hal_srng_init(ab);
if (ret)
return ret;
clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
ret = ath11k_core_qmi_firmware_ready(ab);
if (ret)
goto err_hal_srng_deinit;
clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
return 0;
err_hal_srng_deinit:
ath11k_hal_srng_deinit(ab);
return ret;
}
void ath11k_core_halt(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
ar->num_created_vdevs = 0;
ar->allocated_vdev_map = 0;
ath11k_mac_scan_finish(ar);
ath11k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->update_11d_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
INIT_LIST_HEAD(&ar->arvifs);
idr_init(&ar->txmgmt_idr);
}
static void ath11k_update_11d(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
struct ath11k *ar;
struct ath11k_pdev *pdev;
struct wmi_set_current_country_params set_current_param = {};
int ret, i;
spin_lock_bh(&ab->base_lock);
memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2);
spin_unlock_bh(&ab->base_lock);
ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n",
set_current_param.alpha2[0],
set_current_param.alpha2[1]);
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret)
ath11k_warn(ar->ab,
"pdev id %d failed set current country code: %d\n",
i, ret);
}
}
void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i;
spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++;
spin_unlock_bh(&ab->base_lock);
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH11K_STATE_OFF ||
ar->state == ATH11K_STATE_FTM)
continue;
ieee80211_stop_queues(ar->hw);
ath11k_mac_drain_tx(ar);
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
complete_all(&ar->scan.completed);
complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
complete(&ar->vdev_setup_done);
complete(&ar->vdev_delete_done);
complete(&ar->bss_survey_done);
complete(&ar->thermal.wmi_sync);
wake_up(&ar->dp.tx_empty_waitq);
idr_for_each(&ar->txmgmt_idr,
ath11k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
wake_up(&ar->txmgmt_empty_waitq);
ar->monitor_vdev_id = -1;
clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
}
wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
reinit_completion(&ab->driver_recovery);
}
static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH11K_STATE_OFF)
continue;
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
case ATH11K_STATE_ON:
ar->state = ATH11K_STATE_RESTARTING;
ath11k_core_halt(ar);
ieee80211_restart_hw(ar->hw);
break;
case ATH11K_STATE_OFF:
ath11k_warn(ab,
"cannot restart radio %d that hasn't been started\n",
i);
break;
case ATH11K_STATE_RESTARTING:
break;
case ATH11K_STATE_RESTARTED:
ar->state = ATH11K_STATE_WEDGED;
fallthrough;
case ATH11K_STATE_WEDGED:
ath11k_warn(ab,
"device is wedged, will not restart radio %d\n", i);
break;
case ATH11K_STATE_FTM:
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"fw mode reset done radio %d\n", i);
break;
}
mutex_unlock(&ar->conf_mutex);
}
complete(&ab->driver_recovery);
}
static void ath11k_core_restart(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
int ret;
ret = ath11k_core_reconfigure_on_crash(ab);
if (ret) {
ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
return;
}
if (ab->is_reset)
complete_all(&ab->reconfigure_complete);
if (!ab->is_reset)
ath11k_core_post_reconfigure_recovery(ab);
}
static void ath11k_core_reset(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work);
int reset_count, fail_cont_count;
long time_left;
if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) {
ath11k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
return;
}
/* Sometimes the recovery will fail and then the next all recovery fail,
* this is to avoid infinite recovery since it can not recovery success.
*/
fail_cont_count = atomic_read(&ab->fail_cont_count);
if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FINAL)
return;
if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FIRST &&
time_before(jiffies, ab->reset_fail_timeout))
return;
reset_count = atomic_inc_return(&ab->reset_count);
if (reset_count > 1) {
/* Sometimes it happened another reset worker before the previous one
* completed, then the second reset worker will destroy the previous one,
* thus below is to avoid that.
*/
ath11k_warn(ab, "already resetting count %d\n", reset_count);
reinit_completion(&ab->reset_complete);
time_left = wait_for_completion_timeout(&ab->reset_complete,
ATH11K_RESET_TIMEOUT_HZ);
if (time_left) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "to skip reset\n");
atomic_dec(&ab->reset_count);
return;
}
ab->reset_fail_timeout = jiffies + ATH11K_RESET_FAIL_TIMEOUT_HZ;
/* Record the continuous recovery fail count when recovery failed*/
atomic_inc(&ab->fail_cont_count);
}
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset starting\n");
ab->is_reset = true;
atomic_set(&ab->recovery_count, 0);
reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_start_count, 0);
ath11k_core_pre_reconfigure_recovery(ab);
reinit_completion(&ab->reconfigure_complete);
ath11k_core_post_reconfigure_recovery(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "waiting recovery start...\n");
time_left = wait_for_completion_timeout(&ab->recovery_start,
ATH11K_RECOVER_START_TIMEOUT_HZ);
ath11k_hif_power_down(ab);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
}
static int ath11k_init_hw_params(struct ath11k_base *ab)
{
const struct ath11k_hw_params *hw_params = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(ath11k_hw_params); i++) {
hw_params = &ath11k_hw_params[i];
if (hw_params->hw_rev == ab->hw_rev)
break;
}
if (i == ARRAY_SIZE(ath11k_hw_params)) {
ath11k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev);
return -EINVAL;
}
ab->hw_params = *hw_params;
ath11k_info(ab, "%s\n", ab->hw_params.name);
return 0;
}
int ath11k_core_pre_init(struct ath11k_base *ab)
{
int ret;
ret = ath11k_init_hw_params(ab);
if (ret) {
ath11k_err(ab, "failed to get hw params: %d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(ath11k_core_pre_init);
int ath11k_core_init(struct ath11k_base *ab)
{
int ret;
ret = ath11k_core_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create soc core: %d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(ath11k_core_init);
void ath11k_core_deinit(struct ath11k_base *ab)
{
mutex_lock(&ab->core_lock);
ath11k_core_pdev_destroy(ab);
ath11k_core_stop(ab);
mutex_unlock(&ab->core_lock);
ath11k_hif_power_down(ab);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
}
EXPORT_SYMBOL(ath11k_core_deinit);
void ath11k_core_free(struct ath11k_base *ab)
{
destroy_workqueue(ab->workqueue_aux);
destroy_workqueue(ab->workqueue);
kfree(ab);
}
EXPORT_SYMBOL(ath11k_core_free);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
enum ath11k_bus bus)
{
struct ath11k_base *ab;
ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
if (!ab)
return NULL;
init_completion(&ab->driver_recovery);
ab->workqueue = create_singlethread_workqueue("ath11k_wq");
if (!ab->workqueue)
goto err_sc_free;
ab->workqueue_aux = create_singlethread_workqueue("ath11k_aux_wq");
if (!ab->workqueue_aux)
goto err_free_wq;
mutex_init(&ab->core_lock);
mutex_init(&ab->tbl_mtx_lock);
spin_lock_init(&ab->base_lock);
mutex_init(&ab->vdev_id_11d_lock);
init_completion(&ab->reset_complete);
init_completion(&ab->reconfigure_complete);
init_completion(&ab->recovery_start);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
init_waitqueue_head(&ab->qmi.cold_boot_waitq);
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
INIT_WORK(&ab->reset_work, ath11k_core_reset);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
ab->dev = dev;
ab->hif.bus = bus;
return ab;
err_free_wq:
destroy_workqueue(ab->workqueue);
err_sc_free:
kfree(ab);
return NULL;
}
EXPORT_SYMBOL(ath11k_core_alloc);
MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ax wireless LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
|
linux-master
|
drivers/net/wireless/ath/ath11k/core.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/vmalloc.h>
#include "core.h"
#include "debug.h"
void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
va_start(args, fmt);
vaf.va = &args;
dev_info(ab->dev, "%pV", &vaf);
trace_ath11k_log_info(ab, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath11k_info);
void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
va_start(args, fmt);
vaf.va = &args;
dev_err(ab->dev, "%pV", &vaf);
trace_ath11k_log_err(ab, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath11k_err);
void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
va_start(args, fmt);
vaf.va = &args;
dev_warn_ratelimited(ab->dev, "%pV", &vaf);
trace_ath11k_log_warn(ab, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath11k_warn);
#ifdef CONFIG_ATH11K_DEBUG
void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (ath11k_debug_mask & mask)
dev_printk(KERN_DEBUG, ab->dev, "%s %pV", ath11k_dbg_str(mask), &vaf);
trace_ath11k_log_dbg(ab, mask, &vaf);
va_end(args);
}
EXPORT_SYMBOL(__ath11k_dbg);
void ath11k_dbg_dump(struct ath11k_base *ab,
enum ath11k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
char linebuf[256];
size_t linebuflen;
const void *ptr;
if (ath11k_debug_mask & mask) {
if (msg)
__ath11k_dbg(ab, mask, "%s\n", msg);
for (ptr = buf; (ptr - buf) < len; ptr += 16) {
linebuflen = 0;
linebuflen += scnprintf(linebuf + linebuflen,
sizeof(linebuf) - linebuflen,
"%s%08x: ",
(prefix ? prefix : ""),
(unsigned int)(ptr - buf));
hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
linebuf + linebuflen,
sizeof(linebuf) - linebuflen, true);
dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf);
}
}
/* tracing code doesn't like null strings */
trace_ath11k_log_dbg_dump(ab, msg ? msg : "", prefix ? prefix : "",
buf, len);
}
EXPORT_SYMBOL(ath11k_dbg_dump);
#endif /* CONFIG_ATH11K_DEBUG */
|
linux-master
|
drivers/net/wireless/ath/ath11k/debug.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/ieee80211.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <crypto/hash.h>
#include "core.h"
#include "debug.h"
#include "debugfs_htt_stats.h"
#include "debugfs_sta.h"
#include "hal_desc.h"
#include "hw.h"
#include "dp_rx.h"
#include "hal_rx.h"
#include "dp_tx.h"
#include "peer.h"
#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
static inline
u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
}
static inline
enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
}
static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
}
static inline
bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
}
static inline
u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
}
static inline
bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
}
static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
return ieee80211_has_morefrags(hdr->frame_control);
}
static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
}
static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
__le32_to_cpu(attn->info2));
}
static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
__le32_to_cpu(attn->info1));
}
static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
__le32_to_cpu(attn->info1));
}
static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
{
return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
__le32_to_cpu(attn->info2)) ==
RX_DESC_DECRYPT_STATUS_CODE_OK);
}
static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
{
u32 info = __le32_to_cpu(attn->info1);
u32 errmap = 0;
if (info & RX_ATTENTION_INFO1_FCS_ERR)
errmap |= DP_RX_MPDU_ERR_FCS;
if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
errmap |= DP_RX_MPDU_ERR_DECRYPT;
if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
errmap |= DP_RX_MPDU_ERR_OVERFLOW;
if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
return errmap;
}
static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
struct rx_attention *rx_attention;
u32 errmap;
rx_attention = ath11k_dp_rx_get_attention(ab, desc);
errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
}
static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
}
static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
}
static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
}
static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
}
static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
}
static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
}
static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
}
static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
}
static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
}
static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
}
static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
}
static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
}
static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
}
static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
{
return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
__le32_to_cpu(attn->info1));
}
static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
struct hal_rx_desc *rx_desc)
{
u8 *rx_pkt_hdr;
rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
return rx_pkt_hdr;
}
static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
struct hal_rx_desc *rx_desc)
{
u32 tlv_tag;
tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
return tlv_tag == HAL_RX_MPDU_START;
}
static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
struct hal_rx_desc *rx_desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
}
static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
struct hal_rx_desc *desc,
u16 len)
{
ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
__le32_to_cpu(attn->info1)));
}
static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
}
static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
}
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
mod_timer(&ab->mon_reap_timer, jiffies +
msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
}
static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
{
int i, reaped = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
do {
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
reaped += ath11k_dp_rx_process_mon_rings(ab, i,
NULL,
DP_MON_SERVICE_BUDGET);
/* nothing more to reap */
if (reaped < DP_MON_SERVICE_BUDGET)
return 0;
} while (time_before(jiffies, timeout));
ath11k_warn(ab, "dp mon ring purge timeout");
return -ETIMEDOUT;
}
/* Returns number of Rx buffers replenished */
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
enum hal_rx_buf_return_buf_manager mgr)
{
struct hal_srng *srng;
u32 *desc;
struct sk_buff *skb;
int num_free;
int num_remain;
int buf_id;
u32 cookie;
dma_addr_t paddr;
req_entries = min(req_entries, rx_ring->bufs_max);
srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
req_entries = num_free;
req_entries = min(num_free, req_entries);
num_remain = req_entries;
while (num_remain > 0) {
skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
DP_RX_BUFFER_ALIGN_SIZE);
if (!skb)
break;
if (!IS_ALIGNED((unsigned long)skb->data,
DP_RX_BUFFER_ALIGN_SIZE)) {
skb_pull(skb,
PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
skb->data);
}
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (dma_mapping_error(ab->dev, paddr))
goto fail_free_skb;
spin_lock_bh(&rx_ring->idr_lock);
buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
(rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (buf_id <= 0)
goto fail_dma_unmap;
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
goto fail_idr_remove;
ATH11K_SKB_RXCB(skb)->paddr = paddr;
cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
num_remain--;
ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
}
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
fail_idr_remove:
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
}
static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
struct dp_rxdma_ring *rx_ring)
{
struct sk_buff *skb;
int buf_id;
spin_lock_bh(&rx_ring->idr_lock);
idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
idr_remove(&rx_ring->bufs_idr, buf_id);
/* TODO: Understand where internal driver does this dma_unmap
* of rxdma_buffer.
*/
dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
idr_destroy(&rx_ring->bufs_idr);
spin_unlock_bh(&rx_ring->idr_lock);
return 0;
}
static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_base *ab = ar->ab;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
int i;
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
rx_ring = &dp->rxdma_mon_buf_ring;
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
rx_ring = &dp->rx_mon_status_refill_ring[i];
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
}
return 0;
}
static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
struct dp_rxdma_ring *rx_ring,
u32 ringtype)
{
struct ath11k_pdev_dp *dp = &ar->dp;
int num_entries;
num_entries = rx_ring->refill_buf_ring.size /
ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
rx_ring->bufs_max = num_entries;
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
ar->ab->hw_params.hal_params->rx_buf_rbm);
return 0;
}
static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_base *ab = ar->ab;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
int i;
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
if (ar->ab->hw_params.rxdma1_enable) {
rx_ring = &dp->rxdma_mon_buf_ring;
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
}
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
rx_ring = &dp->rx_mon_status_refill_ring[i];
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
}
return 0;
}
static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_base *ab = ar->ab;
int i;
ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
if (ab->hw_params.rx_mac_buf_ring)
ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
ath11k_dp_srng_cleanup(ab,
&dp->rx_mon_status_refill_ring[i].refill_buf_ring);
}
ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
}
void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
int i;
for (i = 0; i < DP_REO_DST_RING_MAX; i++)
ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
}
int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
int ret;
int i;
for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
HAL_REO_DST, i, 0,
DP_REO_DST_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to setup reo_dst_ring\n");
goto err_reo_cleanup;
}
}
return 0;
err_reo_cleanup:
ath11k_dp_pdev_reo_cleanup(ab);
return ret;
}
static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_base *ab = ar->ab;
struct dp_srng *srng = NULL;
int i;
int ret;
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rx_refill_buf_ring.refill_buf_ring,
HAL_RXDMA_BUF, 0,
dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
return ret;
}
if (ar->ab->hw_params.rx_mac_buf_ring) {
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
dp->mac_id + i, 1024);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
i);
return ret;
}
}
}
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
HAL_RXDMA_DST, 0, dp->mac_id + i,
DP_RXDMA_ERR_DST_RING_SIZE);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
return ret;
}
}
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
ret = ath11k_dp_srng_setup(ar->ab,
srng,
HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
DP_RXDMA_MON_STATUS_RING_SIZE);
if (ret) {
ath11k_warn(ar->ab,
"failed to setup rx_mon_status_refill_ring %d\n", i);
return ret;
}
}
/* if rxdma1_enable is false, then it doesn't need
* to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
* and rxdma_mon_desc_ring.
* init reap timer for QCA6390.
*/
if (!ar->ab->hw_params.rxdma1_enable) {
//init mon status buffer reap timer
timer_setup(&ar->ab->mon_reap_timer,
ath11k_dp_service_mon_ring, 0);
return 0;
}
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_buf_ring.refill_buf_ring,
HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
DP_RXDMA_MONITOR_BUF_RING_SIZE);
if (ret) {
ath11k_warn(ar->ab,
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
DP_RXDMA_MONITOR_DST_RING_SIZE);
if (ret) {
ath11k_warn(ar->ab,
"failed to setup HAL_RXDMA_MONITOR_DST\n");
return ret;
}
ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
DP_RXDMA_MONITOR_DESC_RING_SIZE);
if (ret) {
ath11k_warn(ar->ab,
"failed to setup HAL_RXDMA_MONITOR_DESC\n");
return ret;
}
return 0;
}
void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct dp_reo_cmd *cmd, *tmp;
struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
struct dp_rx_tid *rx_tid;
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
rx_tid = &cmd->data;
if (rx_tid->vaddr) {
dma_unmap_single(ab->dev, rx_tid->paddr,
rx_tid->size, DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
kfree(cmd);
}
list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
rx_tid = &cmd_cache->data;
if (rx_tid->vaddr) {
dma_unmap_single(ab->dev, rx_tid->paddr,
rx_tid->size, DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
kfree(cmd_cache);
}
spin_unlock_bh(&dp->reo_cmd_lock);
}
static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
struct dp_rx_tid *rx_tid = ctx;
if (status != HAL_REO_CMD_SUCCESS)
ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
if (rx_tid->vaddr) {
dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
}
static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
struct dp_rx_tid *rx_tid)
{
struct ath11k_hal_reo_cmd cmd = {0};
unsigned long tot_desc_sz, desc_sz;
int ret;
tot_desc_sz = rx_tid->size;
desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
while (tot_desc_sz > desc_sz) {
tot_desc_sz -= desc_sz;
cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE, &cmd,
NULL);
if (ret)
ath11k_warn(ab,
"failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
rx_tid->tid, ret);
}
memset(&cmd, 0, sizeof(cmd));
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE,
&cmd, ath11k_dp_reo_cmd_free);
if (ret) {
ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
}
static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
struct ath11k_base *ab = dp->ab;
struct dp_rx_tid *rx_tid = ctx;
struct dp_reo_cache_flush_elem *elem, *tmp;
if (status == HAL_REO_CMD_DRAIN) {
goto free_desc;
} else if (status != HAL_REO_CMD_SUCCESS) {
/* Shouldn't happen! Cleanup in case of other failure? */
ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
rx_tid->tid, status);
return;
}
elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
if (!elem)
goto free_desc;
elem->ts = jiffies;
memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
dp->reo_cmd_cache_flush_count++;
/* Flush and invalidate aged REO desc from HW cache */
list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
list) {
if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
time_after(jiffies, elem->ts +
msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
list_del(&elem->list);
dp->reo_cmd_cache_flush_count--;
spin_unlock_bh(&dp->reo_cmd_lock);
ath11k_dp_reo_cache_flush(ab, &elem->data);
kfree(elem);
spin_lock_bh(&dp->reo_cmd_lock);
}
}
spin_unlock_bh(&dp->reo_cmd_lock);
return;
free_desc:
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
struct ath11k_peer *peer, u8 tid)
{
struct ath11k_hal_reo_cmd cmd = {0};
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
int ret;
if (!rx_tid->active)
return;
rx_tid->active = false;
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
ath11k_dp_rx_tid_del_func);
if (ret) {
if (ret != -ESHUTDOWN)
ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
rx_tid->paddr = 0;
rx_tid->size = 0;
}
static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
u32 *link_desc,
enum hal_wbm_rel_bm_act action)
{
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
u32 *desc;
int ret = 0;
srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!desc) {
ret = -ENOBUFS;
goto exit;
}
ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
action);
exit:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return ret;
}
static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
{
struct ath11k_base *ab = rx_tid->ab;
lockdep_assert_held(&ab->base_lock);
if (rx_tid->dst_ring_desc) {
if (rel_link_desc)
ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
kfree(rx_tid->dst_ring_desc);
rx_tid->dst_ring_desc = NULL;
}
rx_tid->cur_sn = 0;
rx_tid->last_frag_no = 0;
rx_tid->rx_frag_bitmap = 0;
__skb_queue_purge(&rx_tid->rx_frags);
}
void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
{
struct dp_rx_tid *rx_tid;
int i;
lockdep_assert_held(&ar->ab->base_lock);
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
spin_unlock_bh(&ar->ab->base_lock);
del_timer_sync(&rx_tid->frag_timer);
spin_lock_bh(&ar->ab->base_lock);
ath11k_dp_rx_frags_cleanup(rx_tid, true);
}
}
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
{
struct dp_rx_tid *rx_tid;
int i;
lockdep_assert_held(&ar->ab->base_lock);
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
ath11k_peer_rx_tid_delete(ar, peer, i);
ath11k_dp_rx_frags_cleanup(rx_tid, true);
spin_unlock_bh(&ar->ab->base_lock);
del_timer_sync(&rx_tid->frag_timer);
spin_lock_bh(&ar->ab->base_lock);
}
}
static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
struct ath11k_peer *peer,
struct dp_rx_tid *rx_tid,
u32 ba_win_sz, u16 ssn,
bool update_ssn)
{
struct ath11k_hal_reo_cmd cmd = {0};
int ret;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
cmd.ba_window_size = ba_win_sz;
if (update_ssn) {
cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
}
ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
NULL);
if (ret) {
ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
rx_tid->tid, ret);
return ret;
}
rx_tid->ba_win_sz = ba_win_sz;
return 0;
}
static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
const u8 *peer_mac, int vdev_id, u8 tid)
{
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
goto unlock_exit;
}
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->active)
goto unlock_exit;
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
rx_tid->active = false;
unlock_exit:
spin_unlock_bh(&ab->base_lock);
}
int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
u32 hw_desc_sz;
u32 *addr_aligned;
void *vaddr;
dma_addr_t paddr;
int ret;
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
peer_mac);
spin_unlock_bh(&ab->base_lock);
return -ENOENT;
}
rx_tid = &peer->rx_tid[tid];
/* Update the tid queue if it is already setup */
if (rx_tid->active) {
paddr = rx_tid->paddr;
ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
ba_win_sz, ssn, true);
spin_unlock_bh(&ab->base_lock);
if (ret) {
ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
peer_mac, tid, ret);
return ret;
}
ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
peer_mac, paddr,
tid, 1, ba_win_sz);
if (ret)
ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
return ret;
}
rx_tid->tid = tid;
rx_tid->ba_win_sz = ba_win_sz;
/* TODO: Optimize the memory allocation for qos tid based on
* the actual BA window size in REO tid update path.
*/
if (tid == HAL_DESC_REO_NON_QOS_TID)
hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
if (!vaddr) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
}
addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
ssn, pn_type);
paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(ab->dev, paddr);
if (ret) {
spin_unlock_bh(&ab->base_lock);
ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
peer_mac, tid, ret);
goto err_mem_free;
}
rx_tid->vaddr = vaddr;
rx_tid->paddr = paddr;
rx_tid->size = hw_desc_sz;
rx_tid->active = true;
spin_unlock_bh(&ab->base_lock);
ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
paddr, tid, 1, ba_win_sz);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
}
return ret;
err_mem_free:
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
return ret;
}
int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
struct ieee80211_ampdu_params *params)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
int vdev_id = arsta->arvif->vdev_id;
int ret;
ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
params->tid, params->buf_size,
params->ssn, arsta->pn_type);
if (ret)
ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
return ret;
}
int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
struct ieee80211_ampdu_params *params)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
int vdev_id = arsta->arvif->vdev_id;
dma_addr_t paddr;
bool active;
int ret;
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
if (!peer) {
ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
spin_unlock_bh(&ab->base_lock);
return -ENOENT;
}
paddr = peer->rx_tid[params->tid].paddr;
active = peer->rx_tid[params->tid].active;
if (!active) {
spin_unlock_bh(&ab->base_lock);
return 0;
}
ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
spin_unlock_bh(&ab->base_lock);
if (ret) {
ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
params->tid, ret);
return ret;
}
ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
params->sta->addr, paddr,
params->tid, 1, 1);
if (ret)
ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
ret);
return ret;
}
int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct ath11k_hal_reo_cmd cmd = {0};
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
u8 tid;
int ret = 0;
/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
* We use mac80211 PN/TSC replay check functionality for bcast/mcast
* for now.
*/
if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return 0;
cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
HAL_REO_CMD_UPD0_PN_SIZE |
HAL_REO_CMD_UPD0_PN_VALID |
HAL_REO_CMD_UPD0_PN_CHECK |
HAL_REO_CMD_UPD0_SVLD;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (key_cmd == SET_KEY) {
cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
cmd.pn_size = 48;
}
break;
default:
break;
}
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
if (!peer) {
ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
spin_unlock_bh(&ab->base_lock);
return -ENOENT;
}
for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->active)
continue;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE,
&cmd, NULL);
if (ret) {
ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
tid, ret);
break;
}
}
spin_unlock_bh(&ab->base_lock);
return ret;
}
static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
u16 peer_id)
{
int i;
for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
if (ppdu_stats->user_stats[i].is_valid_peer_id) {
if (peer_id == ppdu_stats->user_stats[i].peer_id)
return i;
} else {
return i;
}
}
return -EINVAL;
}
static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
u16 tag, u16 len, const void *ptr,
void *data)
{
struct htt_ppdu_stats_info *ppdu_info;
struct htt_ppdu_user_stats *user_stats;
int cur_user;
u16 peer_id;
ppdu_info = (struct htt_ppdu_stats_info *)data;
switch (tag) {
case HTT_PPDU_STATS_TAG_COMMON:
if (len < sizeof(struct htt_ppdu_stats_common)) {
ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
sizeof(struct htt_ppdu_stats_common));
break;
case HTT_PPDU_STATS_TAG_USR_RATE:
if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
peer_id);
if (cur_user < 0)
return -EINVAL;
user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
user_stats->peer_id = peer_id;
user_stats->is_valid_peer_id = true;
memcpy((void *)&user_stats->rate, ptr,
sizeof(struct htt_ppdu_stats_user_rate));
user_stats->tlv_flags |= BIT(tag);
break;
case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
peer_id);
if (cur_user < 0)
return -EINVAL;
user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
user_stats->peer_id = peer_id;
user_stats->is_valid_peer_id = true;
memcpy((void *)&user_stats->cmpltn_cmn, ptr,
sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
user_stats->tlv_flags |= BIT(tag);
break;
case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
if (len <
sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
peer_id =
((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
peer_id);
if (cur_user < 0)
return -EINVAL;
user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
user_stats->peer_id = peer_id;
user_stats->is_valid_peer_id = true;
memcpy((void *)&user_stats->ack_ba, ptr,
sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
user_stats->tlv_flags |= BIT(tag);
break;
}
return 0;
}
int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
const void *ptr, void *data),
void *data)
{
const struct htt_tlv *tlv;
const void *begin = ptr;
u16 tlv_tag, tlv_len;
int ret = -EINVAL;
while (len > 0) {
if (len < sizeof(*tlv)) {
ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
ptr - begin, len, sizeof(*tlv));
return -EINVAL;
}
tlv = (struct htt_tlv *)ptr;
tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
ptr += sizeof(*tlv);
len -= sizeof(*tlv);
if (tlv_len > len) {
ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
ret = iter(ab, tlv_tag, tlv_len, ptr, data);
if (ret == -ENOMEM)
return ret;
ptr += tlv_len;
len -= tlv_len;
}
return 0;
}
static void
ath11k_update_per_peer_tx_stats(struct ath11k *ar,
struct htt_ppdu_stats *ppdu_stats, u8 user)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
struct ieee80211_sta *sta;
struct ath11k_sta *arsta;
struct htt_ppdu_stats_user_rate *user_rate;
struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
struct htt_ppdu_stats_common *common = &ppdu_stats->common;
int ret;
u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
u32 succ_bytes = 0;
u16 rate = 0, succ_pkts = 0;
u32 tx_duration = 0;
u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
bool is_ampdu = false;
if (!usr_stats)
return;
if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
return;
if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
is_ampdu =
HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
if (usr_stats->tlv_flags &
BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
succ_bytes = usr_stats->ack_ba.success_bytes;
succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
usr_stats->ack_ba.info);
tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
usr_stats->ack_ba.info);
}
if (common->fes_duration_us)
tx_duration = common->fes_duration_us;
user_rate = &usr_stats->rate;
flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
/* Note: If host configured fixed rates and in some other special
* cases, the broadcast/management frames are sent in different rates.
* Firmware rate's control to be skipped for this?
*/
if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
mcs, nss);
return;
}
if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
flags,
&rate_idx,
&rate);
if (ret < 0)
return;
}
rcu_read_lock();
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
if (!peer || !peer->sta) {
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
return;
}
sta = peer->sta;
arsta = (struct ath11k_sta *)sta->drv_priv;
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
switch (flags) {
case WMI_RATE_PREAMBLE_OFDM:
arsta->txrate.legacy = rate;
break;
case WMI_RATE_PREAMBLE_CCK:
arsta->txrate.legacy = rate;
break;
case WMI_RATE_PREAMBLE_HT:
arsta->txrate.mcs = mcs + 8 * (nss - 1);
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case WMI_RATE_PREAMBLE_VHT:
arsta->txrate.mcs = mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case WMI_RATE_PREAMBLE_HE:
arsta->txrate.mcs = mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
arsta->txrate.he_dcm = dcm;
arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
((user_rate->ru_end -
user_rate->ru_start) + 1);
break;
}
arsta->txrate.nss = nss;
arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
arsta->tx_duration += tx_duration;
memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
* So skip peer stats update for mgmt packets.
*/
if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
memset(peer_stats, 0, sizeof(*peer_stats));
peer_stats->succ_pkts = succ_pkts;
peer_stats->succ_bytes = succ_bytes;
peer_stats->is_ampdu = is_ampdu;
peer_stats->duration = tx_duration;
peer_stats->ba_fails =
HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
}
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
}
static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
struct htt_ppdu_stats *ppdu_stats)
{
u8 user;
for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
}
static
struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
u32 ppdu_id)
{
struct htt_ppdu_stats_info *ppdu_info;
lockdep_assert_held(&ar->data_lock);
if (!list_empty(&ar->ppdu_stats_info)) {
list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
if (ppdu_info->ppdu_id == ppdu_id)
return ppdu_info;
}
if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
ppdu_info = list_first_entry(&ar->ppdu_stats_info,
typeof(*ppdu_info), list);
list_del(&ppdu_info->list);
ar->ppdu_stat_list_depth--;
ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
kfree(ppdu_info);
}
}
ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
if (!ppdu_info)
return NULL;
list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
ar->ppdu_stat_list_depth++;
return ppdu_info;
}
static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k_htt_ppdu_stats_msg *msg;
struct htt_ppdu_stats_info *ppdu_info;
struct ath11k *ar;
int ret;
u8 pdev_id;
u32 ppdu_id, len;
msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
ppdu_id = msg->ppdu_id;
rcu_read_lock();
ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
ret = -EINVAL;
goto out;
}
if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
spin_lock_bh(&ar->data_lock);
ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
if (!ppdu_info) {
ret = -EINVAL;
goto out_unlock_data;
}
ppdu_info->ppdu_id = ppdu_id;
ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
ath11k_htt_tlv_ppdu_stats_parse,
(void *)ppdu_info);
if (ret) {
ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
goto out_unlock_data;
}
out_unlock_data:
spin_unlock_bh(&ar->data_lock);
out:
rcu_read_unlock();
return ret;
}
static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
{
struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
struct ath11k *ar;
u8 pdev_id;
pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
return;
}
trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
ar->ab->pktlog_defs_checksum);
}
static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
u32 *data = (u32 *)skb->data;
u8 pdev_id, ring_type, ring_id, pdev_idx;
u16 hp, tp;
u32 backpressure_time;
struct ath11k_bp_stats *bp_stats;
pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
++data;
hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
++data;
backpressure_time = *data;
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
return;
bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
} else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
pdev_idx = DP_HW2SW_MACID(pdev_id);
if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
return;
bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
} else {
ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
ring_type);
return;
}
spin_lock_bh(&ab->base_lock);
bp_stats->hp = hp;
bp_stats->tp = tp;
bp_stats->count++;
bp_stats->jiffies = jiffies;
spin_unlock_bh(&ab->base_lock);
}
void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k_dp *dp = &ab->dp;
struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
u16 peer_id;
u8 vdev_id;
u8 mac_addr[ETH_ALEN];
u16 peer_mac_h16;
u16 ast_hash;
u16 hw_peer_id;
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
switch (type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF:
dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
resp->version_msg.version);
dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
resp->version_msg.version);
complete(&dp->htt_tgt_version_received);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP:
vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
resp->peer_map_ev.info);
peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
resp->peer_map_ev.info);
peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
resp->peer_map_ev.info1);
ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
peer_mac_h16, mac_addr);
ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP2:
vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
resp->peer_map_ev.info);
peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
resp->peer_map_ev.info);
peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
resp->peer_map_ev.info1);
ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
peer_mac_h16, mac_addr);
ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
resp->peer_map_ev.info2);
hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
resp->peer_map_ev.info1);
ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
hw_peer_id);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
resp->peer_unmap_ev.info);
ath11k_peer_unmap_event(ab, peer_id);
break;
case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
ath11k_htt_pull_ppdu_stats(ab, skb);
break;
case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
ath11k_debugfs_htt_ext_stats_handler(ab, skb);
break;
case HTT_T2H_MSG_TYPE_PKTLOG:
ath11k_htt_pktlog(ab, skb);
break;
case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
ath11k_htt_backpressure_event_handler(ab, skb);
break;
default:
ath11k_warn(ab, "htt event %d not handled\n", type);
break;
}
dev_kfree_skb_any(skb);
}
static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
struct sk_buff_head *msdu_list,
struct sk_buff *first, struct sk_buff *last,
u8 l3pad_bytes, int msdu_len)
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *skb;
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
int buf_first_hdr_len, buf_first_len;
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
* the length of the msdu in the first buffer.
*/
buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
skb_put(first, buf_first_hdr_len + msdu_len);
skb_pull(first, buf_first_hdr_len);
return 0;
}
ldesc = (struct hal_rx_desc *)last->data;
rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
/* MSDU spans over multiple buffers because the length of the MSDU
* exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
* in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
*/
skb_put(first, DP_RX_BUFFER_SIZE);
skb_pull(first, buf_first_hdr_len);
/* When an MSDU spread over multiple buffers attention, MSDU_END and
* MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
*/
ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
if (space_extra > 0 &&
(pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
/* Free up all buffers of the MSDU */
while ((skb = __skb_dequeue(msdu_list)) != NULL) {
rxcb = ATH11K_SKB_RXCB(skb);
if (!rxcb->is_continuation) {
dev_kfree_skb_any(skb);
break;
}
dev_kfree_skb_any(skb);
}
return -ENOMEM;
}
rem_len = msdu_len - buf_first_len;
while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
rxcb = ATH11K_SKB_RXCB(skb);
if (rxcb->is_continuation)
buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
else
buf_len = rem_len;
if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
WARN_ON_ONCE(1);
dev_kfree_skb_any(skb);
return -EINVAL;
}
skb_put(skb, buf_len + hal_rx_desc_sz);
skb_pull(skb, hal_rx_desc_sz);
skb_copy_from_linear_data(skb, skb_put(first, buf_len),
buf_len);
dev_kfree_skb_any(skb);
rem_len -= buf_len;
if (!rxcb->is_continuation)
break;
}
return 0;
}
static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
struct sk_buff *first)
{
struct sk_buff *skb;
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
if (!rxcb->is_continuation)
return first;
skb_queue_walk(msdu_list, skb) {
rxcb = ATH11K_SKB_RXCB(skb);
if (!rxcb->is_continuation)
return skb;
}
return NULL;
}
static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
struct rx_attention *rx_attention;
bool ip_csum_fail, l4_csum_fail;
rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
}
static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
case HAL_ENCRYPT_TYPE_TKIP_MIC:
return 0;
case HAL_ENCRYPT_TYPE_CCMP_128:
return IEEE80211_CCMP_MIC_LEN;
case HAL_ENCRYPT_TYPE_CCMP_256:
return IEEE80211_CCMP_256_MIC_LEN;
case HAL_ENCRYPT_TYPE_GCMP_128:
case HAL_ENCRYPT_TYPE_AES_GCMP_256:
return IEEE80211_GCMP_MIC_LEN;
case HAL_ENCRYPT_TYPE_WEP_40:
case HAL_ENCRYPT_TYPE_WEP_104:
case HAL_ENCRYPT_TYPE_WEP_128:
case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
case HAL_ENCRYPT_TYPE_WAPI:
break;
}
ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
return 0;
}
static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
return 0;
case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
case HAL_ENCRYPT_TYPE_TKIP_MIC:
return IEEE80211_TKIP_IV_LEN;
case HAL_ENCRYPT_TYPE_CCMP_128:
return IEEE80211_CCMP_HDR_LEN;
case HAL_ENCRYPT_TYPE_CCMP_256:
return IEEE80211_CCMP_256_HDR_LEN;
case HAL_ENCRYPT_TYPE_GCMP_128:
case HAL_ENCRYPT_TYPE_AES_GCMP_256:
return IEEE80211_GCMP_HDR_LEN;
case HAL_ENCRYPT_TYPE_WEP_40:
case HAL_ENCRYPT_TYPE_WEP_104:
case HAL_ENCRYPT_TYPE_WEP_128:
case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
case HAL_ENCRYPT_TYPE_WAPI:
break;
}
ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
return 0;
}
static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
case HAL_ENCRYPT_TYPE_CCMP_128:
case HAL_ENCRYPT_TYPE_CCMP_256:
case HAL_ENCRYPT_TYPE_GCMP_128:
case HAL_ENCRYPT_TYPE_AES_GCMP_256:
return 0;
case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
case HAL_ENCRYPT_TYPE_TKIP_MIC:
return IEEE80211_TKIP_ICV_LEN;
case HAL_ENCRYPT_TYPE_WEP_40:
case HAL_ENCRYPT_TYPE_WEP_104:
case HAL_ENCRYPT_TYPE_WEP_128:
case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
case HAL_ENCRYPT_TYPE_WAPI:
break;
}
ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
return 0;
}
static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
struct sk_buff *msdu,
u8 *first_hdr,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
struct ieee80211_hdr *hdr;
size_t hdr_len;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
u16 qos_ctl = 0;
u8 *qos;
/* copy SA & DA and pull decapped header */
hdr = (struct ieee80211_hdr *)msdu->data;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
if (rxcb->is_first_msdu) {
/* original 802.11 header is valid for the first msdu
* hence we can reuse the same header
*/
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
/* Each A-MSDU subframe will be reported as a separate MSDU,
* so strip the A-MSDU bit from QoS Ctl.
*/
if (ieee80211_is_data_qos(hdr->frame_control)) {
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
} else {
/* Rebuild qos header if this is a middle/last msdu */
hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
/* Reset the order bit as the HT_Control header is stripped */
hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
qos_ctl = rxcb->tid;
if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
/* TODO Add other QoS ctl fields when required */
/* copy decap header before overwriting for reuse below */
memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
}
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
ath11k_dp_rx_crypto_param_len(ar, enctype)),
(void *)hdr + hdr_len,
ath11k_dp_rx_crypto_param_len(ar, enctype));
}
if (!rxcb->is_first_msdu) {
memcpy(skb_push(msdu,
IEEE80211_QOS_CTL_LEN), &qos_ctl,
IEEE80211_QOS_CTL_LEN);
memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
return;
}
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
* case of 4addr it may also have different SA
*/
hdr = (struct ieee80211_hdr *)msdu->data;
ether_addr_copy(ieee80211_get_DA(hdr), da);
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status,
bool decrypted)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
if (!rxcb->is_first_msdu ||
!(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
WARN_ON_ONCE(1);
return;
}
skb_trim(msdu, msdu->len - FCS_LEN);
if (!decrypted)
return;
hdr = (void *)msdu->data;
/* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
ath11k_dp_rx_crypto_mic_len(ar, enctype));
skb_trim(msdu, msdu->len -
ath11k_dp_rx_crypto_icv_len(ar, enctype));
} else {
/* MIC */
if (status->flag & RX_FLAG_MIC_STRIPPED)
skb_trim(msdu, msdu->len -
ath11k_dp_rx_crypto_mic_len(ar, enctype));
/* ICV */
if (status->flag & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
ath11k_dp_rx_crypto_icv_len(ar, enctype));
}
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
memmove((void *)msdu->data + crypto_len,
(void *)msdu->data, hdr_len);
skb_pull(msdu, crypto_len);
}
}
static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
struct sk_buff *msdu,
enum hal_encrypt_type enctype)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
struct ieee80211_hdr *hdr;
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_amsdu;
is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
rfc1042 = hdr;
if (rxcb->is_first_msdu) {
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
rfc1042 += hdr_len + crypto_len;
}
if (is_amsdu)
rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
return rfc1042;
}
static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
struct sk_buff *msdu,
u8 *first_hdr,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status)
{
struct ieee80211_hdr *hdr;
struct ethhdr *eth;
size_t hdr_len;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
void *rfc1042;
rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
if (WARN_ON_ONCE(!rfc1042))
return;
/* pull decapped header and copy SA & DA */
eth = (struct ethhdr *)msdu->data;
ether_addr_copy(da, eth->h_dest);
ether_addr_copy(sa, eth->h_source);
skb_pull(msdu, sizeof(struct ethhdr));
/* push rfc1042/llc/snap */
memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
sizeof(struct ath11k_dp_rfc1042_hdr));
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
ath11k_dp_rx_crypto_param_len(ar, enctype)),
(void *)hdr + hdr_len,
ath11k_dp_rx_crypto_param_len(ar, enctype));
}
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
* case of 4addr it may also have different SA
*/
hdr = (struct ieee80211_hdr *)msdu->data;
ether_addr_copy(ieee80211_get_DA(hdr), da);
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status,
bool decrypted)
{
u8 *first_hdr;
u8 decap;
struct ethhdr *ehdr;
first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
switch (decap) {
case DP_RX_DECAP_TYPE_NATIVE_WIFI:
ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
enctype, status);
break;
case DP_RX_DECAP_TYPE_RAW:
ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
decrypted);
break;
case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
ehdr = (struct ethhdr *)msdu->data;
/* mac80211 allows fast path only for authorized STA */
if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
ATH11K_SKB_RXCB(msdu)->is_eapol = true;
ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
enctype, status);
break;
}
/* PN for mcast packets will be validated in mac80211;
* remove eth header and add 802.11 header.
*/
if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
enctype, status);
break;
case DP_RX_DECAP_TYPE_8023:
/* TODO: Handle undecap for these formats */
break;
}
}
static struct ath11k_peer *
ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath11k_peer *peer = NULL;
lockdep_assert_held(&ab->base_lock);
if (rxcb->peer_id)
peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
if (peer)
return peer;
if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
return NULL;
peer = ath11k_peer_find_by_addr(ab,
ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
return peer;
}
static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
bool fill_crypto_hdr;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
struct ath11k_skb_rxcb *rxcb;
struct ieee80211_hdr *hdr;
struct ath11k_peer *peer;
struct rx_attention *rx_attention;
u32 err_bitmap;
/* PN for multicast packets will be checked in mac80211 */
rxcb = ATH11K_SKB_RXCB(msdu);
fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
rxcb->is_mcbc = fill_crypto_hdr;
if (rxcb->is_mcbc) {
rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
}
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
if (peer) {
if (rxcb->is_mcbc)
enctype = peer->sec_type_grp;
else
enctype = peer->sec_type;
} else {
enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
}
spin_unlock_bh(&ar->ab->base_lock);
rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
/* Clear per-MPDU flags while leaving per-PPDU flags intact */
rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED);
if (err_bitmap & DP_RX_MPDU_ERR_FCS)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
rx_status->flag |= RX_FLAG_MMIC_ERROR;
if (is_decrypted) {
rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
if (fill_crypto_hdr)
rx_status->flag |= RX_FLAG_MIC_STRIPPED |
RX_FLAG_ICV_STRIPPED;
else
rx_status->flag |= RX_FLAG_IV_STRIPPED |
RX_FLAG_PN_VALIDATED;
}
ath11k_dp_rx_h_csum_offload(ar, msdu);
ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
enctype, rx_status, is_decrypted);
if (!is_decrypted || fill_crypto_hdr)
return;
if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
}
static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_supported_band *sband;
enum rx_msdu_start_pkt_type pkt_type;
u8 bw;
u8 rate_mcs, nss;
u8 sgi;
bool is_cck, is_ldpc;
pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
switch (pkt_type) {
case RX_MSDU_START_PKT_TYPE_11A:
case RX_MSDU_START_PKT_TYPE_11B:
is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
sband = &ar->mac.sbands[rx_status->band];
rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
is_cck);
break;
case RX_MSDU_START_PKT_TYPE_11N:
rx_status->encoding = RX_ENC_HT;
if (rate_mcs > ATH11K_HT_MCS_MAX) {
ath11k_warn(ar->ab,
"Received with invalid mcs in HT mode %d\n",
rate_mcs);
break;
}
rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
if (sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
break;
case RX_MSDU_START_PKT_TYPE_11AC:
rx_status->encoding = RX_ENC_VHT;
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH11K_VHT_MCS_MAX) {
ath11k_warn(ar->ab,
"Received with invalid mcs in VHT mode %d\n",
rate_mcs);
break;
}
rx_status->nss = nss;
if (sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
if (is_ldpc)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
break;
case RX_MSDU_START_PKT_TYPE_11AX:
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH11K_HE_MCS_MAX) {
ath11k_warn(ar->ab,
"Received with invalid mcs in HE mode %d\n",
rate_mcs);
break;
}
rx_status->encoding = RX_ENC_HE;
rx_status->nss = nss;
rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
break;
}
}
static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
u8 channel_num;
u32 center_freq, meta_data;
struct ieee80211_channel *channel;
rx_status->freq = 0;
rx_status->rate_idx = 0;
rx_status->nss = 0;
rx_status->encoding = RX_ENC_LEGACY;
rx_status->bw = RATE_INFO_BW_20;
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
channel_num = meta_data;
center_freq = meta_data >> 16;
if (center_freq >= ATH11K_MIN_6G_FREQ &&
center_freq <= ATH11K_MAX_6G_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 177) {
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
channel = ar->rx_channel;
if (channel) {
rx_status->band = channel->band;
channel_num =
ieee80211_frequency_to_channel(channel->center_freq);
}
spin_unlock_bh(&ar->data_lock);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
rx_desc, sizeof(struct hal_rx_desc));
}
if (rx_status->band != NL80211_BAND_6GHZ)
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
rx_status->band);
ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
struct ieee80211_rx_status *status)
{
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
};
struct ieee80211_rx_status *rx_status;
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_sta *pubsta = NULL;
struct ath11k_peer *peer;
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
u8 decap = DP_RX_DECAP_TYPE_RAW;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol = rxcb->is_eapol;
if (status->encoding == RX_ENC_HE &&
!(status->flag & RX_FLAG_RADIOTAP_HE) &&
!(status->flag & RX_FLAG_SKIP_MONITOR)) {
he = skb_push(msdu, sizeof(known));
memcpy(he, &known, sizeof(known));
status->flag |= RX_FLAG_RADIOTAP_HE;
}
if (!(status->flag & RX_FLAG_ONLY_MONITOR))
decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
if (peer && peer->sta)
pubsta = peer->sta;
spin_unlock_bh(&ar->ab->base_lock);
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
rxcb->tid,
is_mcbc ? "mcast" : "ucast",
rxcb->seq_no,
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
(status->encoding == RX_ENC_HE) ? "he" : "",
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->nss,
status->freq,
status->band, status->flag,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
msdu->data, msdu->len);
rx_status = IEEE80211_SKB_RXCB(msdu);
*rx_status = *status;
/* TODO: trace rx packet */
/* PN for multicast packets are not validate in HW,
* so skip 802.3 rx path
* Also, fast_rx expects the STA to be authorized, hence
* eapol packets are sent in slow path.
*/
if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
}
static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list,
struct ieee80211_rx_status *rx_status)
{
struct ath11k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
struct rx_attention *rx_attention;
struct ath11k_skb_rxcb *rxcb;
struct sk_buff *last_buf;
u8 l3_pad_bytes;
u8 *hdr_status;
u16 msdu_len;
int ret;
u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
if (!last_buf) {
ath11k_warn(ab,
"No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
ret = -EIO;
goto free_out;
}
rx_desc = (struct hal_rx_desc *)msdu->data;
if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
ath11k_warn(ar->ab, "msdu len not valid\n");
ret = -EIO;
goto free_out;
}
lrx_desc = (struct hal_rx_desc *)last_buf->data;
rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
ath11k_warn(ab, "msdu_done bit in attention is not set\n");
ret = -EIO;
goto free_out;
}
rxcb = ATH11K_SKB_RXCB(msdu);
rxcb->rx_desc = rx_desc;
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
if (rxcb->is_frag) {
skb_pull(msdu, hal_rx_desc_sz);
} else if (!rxcb->is_continuation) {
if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
ret = -EINVAL;
ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
sizeof(struct ieee80211_hdr));
ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
sizeof(struct hal_rx_desc));
goto free_out;
}
skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
} else {
ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
msdu, last_buf,
l3_pad_bytes, msdu_len);
if (ret) {
ath11k_warn(ab,
"failed to coalesce msdu rx buffer%d\n", ret);
goto free_out;
}
}
ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
return 0;
free_out:
return ret;
}
static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
struct napi_struct *napi,
struct sk_buff_head *msdu_list,
int mac_id)
{
struct sk_buff *msdu;
struct ath11k *ar;
struct ieee80211_rx_status rx_status = {0};
int ret;
if (skb_queue_empty(msdu_list))
return;
if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
__skb_queue_purge(msdu_list);
return;
}
ar = ab->pdevs[mac_id].ar;
if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
__skb_queue_purge(msdu_list);
return;
}
while ((msdu = __skb_dequeue(msdu_list))) {
ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
if (unlikely(ret)) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"Unable to process msdu %d", ret);
dev_kfree_skb_any(msdu);
continue;
}
ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
}
}
int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
struct ath11k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
int num_buffs_reaped[MAX_RADIOS] = {0};
struct sk_buff_head msdu_list[MAX_RADIOS];
struct ath11k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
struct hal_srng *srng;
struct sk_buff *msdu;
bool done = false;
int buf_id, mac_id;
struct ath11k *ar;
struct hal_reo_dest_ring *desc;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
int i;
for (i = 0; i < MAX_RADIOS; i++)
__skb_queue_head_init(&msdu_list[i]);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
try_again:
ath11k_hal_srng_access_begin(ab, srng);
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
desc->buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
if (unlikely(buf_id == 0))
continue;
ar = ab->pdevs[mac_id].ar;
rx_ring = &ar->dp.rx_refill_buf_ring;
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
if (unlikely(!msdu)) {
ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
continue;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
rxcb = ATH11K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
num_buffs_reaped[mac_id]++;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
desc->info0);
if (unlikely(push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
continue;
}
rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
desc->rx_mpdu_info.meta_data);
rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
desc->rx_mpdu_info.info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
desc->info0);
rxcb->mac_id = mac_id;
__skb_queue_tail(&msdu_list[mac_id], msdu);
if (rxcb->is_continuation) {
done = false;
} else {
total_msdu_reaped++;
done = true;
}
if (total_msdu_reaped >= budget)
break;
}
/* Hw might have updated the head pointer after we cached it.
* In this case, even though there are entries in the ring we'll
* get rx_desc NULL. Give the read another try with updated cached
* head pointer so that we can reap complete MPDU in the current
* rx processing.
*/
if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
ath11k_hal_srng_access_end(ab, srng);
goto try_again;
}
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
if (unlikely(!total_msdu_reaped))
goto exit;
for (i = 0; i < ab->num_radios; i++) {
if (!num_buffs_reaped[i])
continue;
ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
ar = ab->pdevs[i].ar;
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
ab->hw_params.hal_params->rx_buf_rbm);
}
exit:
return total_msdu_reaped;
}
static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
u32 num_msdu;
int i;
if (!rx_stats)
return;
arsta->rssi_comb = ppdu_info->rssi_comb;
ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
rx_stats->num_msdu += num_msdu;
rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
ppdu_info->tcp_ack_msdu_count;
rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
ppdu_info->nss = 1;
ppdu_info->mcs = HAL_RX_MAX_MCS;
ppdu_info->tid = IEEE80211_NUM_TIDS;
}
if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
if (ppdu_info->gi < HAL_RX_GI_MAX)
rx_stats->gi_count[ppdu_info->gi] += num_msdu;
if (ppdu_info->bw < HAL_RX_BW_MAX)
rx_stats->bw_count[ppdu_info->bw] += num_msdu;
if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
rx_stats->tid_count[ppdu_info->tid] += num_msdu;
if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
if (ppdu_info->is_stbc)
rx_stats->stbc_count += num_msdu;
if (ppdu_info->beamformed)
rx_stats->beamformed_count += num_msdu;
if (ppdu_info->num_mpdu_fcs_ok > 1)
rx_stats->ampdu_msdu_count += num_msdu;
else
rx_stats->non_ampdu_msdu_count += num_msdu;
rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
rx_stats->dcm_count += ppdu_info->dcm;
rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
arsta->rssi_comb = ppdu_info->rssi_comb;
BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
rx_stats->rx_duration += ppdu_info->rx_duration;
arsta->rx_duration = rx_stats->rx_duration;
}
static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
struct dp_rxdma_ring *rx_ring,
int *buf_id)
{
struct sk_buff *skb;
dma_addr_t paddr;
skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
DP_RX_BUFFER_ALIGN_SIZE);
if (!skb)
goto fail_alloc_skb;
if (!IS_ALIGNED((unsigned long)skb->data,
DP_RX_BUFFER_ALIGN_SIZE)) {
skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
skb->data);
}
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ab->dev, paddr)))
goto fail_free_skb;
spin_lock_bh(&rx_ring->idr_lock);
*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
rx_ring->bufs_max, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (*buf_id < 0)
goto fail_dma_unmap;
ATH11K_SKB_RXCB(skb)->paddr = paddr;
return skb;
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
fail_alloc_skb:
return NULL;
}
int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
enum hal_rx_buf_return_buf_manager mgr)
{
struct hal_srng *srng;
u32 *desc;
struct sk_buff *skb;
int num_free;
int num_remain;
int buf_id;
u32 cookie;
dma_addr_t paddr;
req_entries = min(req_entries, rx_ring->bufs_max);
srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
req_entries = min(num_free, req_entries);
num_remain = req_entries;
while (num_remain > 0) {
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
&buf_id);
if (!skb)
break;
paddr = ATH11K_SKB_RXCB(skb)->paddr;
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
goto fail_desc_get;
cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
num_remain--;
ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
}
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
fail_desc_get:
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
}
#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
static void
ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
struct hal_tlv_hdr *tlv)
{
struct hal_rx_ppdu_start *ppdu_start;
u16 ppdu_id_diff, ppdu_id, tlv_len;
u8 *ptr;
/* PPDU id is part of second tlv, move ptr to second tlv */
tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
ptr = (u8 *)tlv;
ptr += sizeof(*tlv) + tlv_len;
tlv = (struct hal_tlv_hdr *)ptr;
if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
return;
ptr += sizeof(*tlv);
ppdu_start = (struct hal_rx_ppdu_start *)ptr;
ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
__le32_to_cpu(ppdu_start->info0));
if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
pmon->buf_state = DP_MON_STATUS_LEAD;
ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
pmon->buf_state = DP_MON_STATUS_LAG;
} else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
pmon->buf_state = DP_MON_STATUS_LAG;
ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
pmon->buf_state = DP_MON_STATUS_LEAD;
}
}
static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
struct ath11k *ar;
const struct ath11k_hw_hal_params *hal_params;
struct ath11k_pdev_dp *dp;
struct dp_rxdma_ring *rx_ring;
struct ath11k_mon_data *pmon;
struct hal_srng *srng;
void *rx_mon_status_desc;
struct sk_buff *skb;
struct ath11k_skb_rxcb *rxcb;
struct hal_tlv_hdr *tlv;
u32 cookie;
int buf_id, srng_id;
dma_addr_t paddr;
u8 rbm;
int num_buffs_reaped = 0;
ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
dp = &ar->dp;
pmon = &dp->mon_data;
srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
while (*budget) {
*budget -= 1;
rx_mon_status_desc =
ath11k_hal_srng_src_peek(ab, srng);
if (!rx_mon_status_desc) {
pmon->buf_state = DP_MON_STATUS_REPLINISH;
break;
}
ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
&cookie, &rbm);
if (paddr) {
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
spin_lock_bh(&rx_ring->idr_lock);
skb = idr_find(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
if (!skb) {
ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
buf_id);
pmon->buf_state = DP_MON_STATUS_REPLINISH;
goto move_next;
}
rxcb = ATH11K_SKB_RXCB(skb);
dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
tlv = (struct hal_tlv_hdr *)skb->data;
if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
HAL_RX_STATUS_BUFFER_DONE) {
ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
FIELD_GET(HAL_TLV_HDR_TAG,
tlv->tl), buf_id);
/* If done status is missing, hold onto status
* ring until status is done for this status
* ring buffer.
* Keep HP in mon_status_ring unchanged,
* and break from here.
* Check status for same buffer for next time
*/
pmon->buf_state = DP_MON_STATUS_NO_DMA;
break;
}
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
if (ab->hw_params.full_monitor_mode) {
ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
if (paddr == pmon->mon_status_paddr)
pmon->buf_state = DP_MON_STATUS_MATCH;
}
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
__skb_queue_tail(skb_list, skb);
} else {
pmon->buf_state = DP_MON_STATUS_REPLINISH;
}
move_next:
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
&buf_id);
if (!skb) {
hal_params = ab->hw_params.hal_params;
ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
hal_params->rx_buf_rbm);
num_buffs_reaped++;
break;
}
rxcb = ATH11K_SKB_RXCB(skb);
cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
cookie,
ab->hw_params.hal_params->rx_buf_rbm);
ath11k_hal_srng_src_get_next_entry(ab, srng);
num_buffs_reaped++;
}
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return num_buffs_reaped;
}
static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
{
struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
spin_lock_bh(&rx_tid->ab->base_lock);
if (rx_tid->last_frag_no &&
rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
spin_unlock_bh(&rx_tid->ab->base_lock);
return;
}
ath11k_dp_rx_frags_cleanup(rx_tid, true);
spin_unlock_bh(&rx_tid->ab->base_lock);
}
int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
{
struct ath11k_base *ab = ar->ab;
struct crypto_shash *tfm;
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
int i;
tfm = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(tfm)) {
ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
PTR_ERR(tfm));
return PTR_ERR(tfm);
}
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
spin_unlock_bh(&ab->base_lock);
crypto_free_shash(tfm);
return -ENOENT;
}
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
rx_tid->ab = ab;
timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
skb_queue_head_init(&rx_tid->rx_frags);
}
peer->tfm_mmic = tfm;
peer->dp_setup_done = true;
spin_unlock_bh(&ab->base_lock);
return 0;
}
static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
struct ieee80211_hdr *hdr, u8 *data,
size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm);
u8 mic_hdr[16] = {0};
u8 tid = 0;
int ret;
if (!tfm)
return -EINVAL;
desc->tfm = tfm;
ret = crypto_shash_setkey(tfm, key, 8);
if (ret)
goto out;
ret = crypto_shash_init(desc);
if (ret)
goto out;
/* TKIP MIC header */
memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
if (ieee80211_is_data_qos(hdr->frame_control))
tid = ieee80211_get_tid(hdr);
mic_hdr[12] = tid;
ret = crypto_shash_update(desc, mic_hdr, 16);
if (ret)
goto out;
ret = crypto_shash_update(desc, data, data_len);
if (ret)
goto out;
ret = crypto_shash_final(desc, mic);
out:
shash_desc_zero(desc);
return ret;
}
static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
struct sk_buff *msdu)
{
struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
struct ieee80211_key_conf *key_conf;
struct ieee80211_hdr *hdr;
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
u8 *key, *data;
u8 key_idx;
if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
HAL_ENCRYPT_TYPE_TKIP_MIC)
return 0;
hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
if (!is_multicast_ether_addr(hdr->addr1))
key_idx = peer->ucast_keyidx;
else
key_idx = peer->mcast_keyidx;
key_conf = peer->keys[key_idx];
data = msdu->data + head_len;
data_len = msdu->len - head_len - tail_len;
key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
goto mic_fail;
return 0;
mic_fail:
(ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
(ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
skb_pull(msdu, hal_rx_desc_sz);
ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
ieee80211_rx(ar->hw, msdu);
return -EINVAL;
}
static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
enum hal_encrypt_type enctype, u32 flags)
{
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
if (!flags)
return;
hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
if (flags & RX_FLAG_MIC_STRIPPED)
skb_trim(msdu, msdu->len -
ath11k_dp_rx_crypto_mic_len(ar, enctype));
if (flags & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
ath11k_dp_rx_crypto_icv_len(ar, enctype));
if (flags & RX_FLAG_IV_STRIPPED) {
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
(void *)msdu->data + hal_rx_desc_sz, hdr_len);
skb_pull(msdu, crypto_len);
}
}
static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
struct ath11k_peer *peer,
struct dp_rx_tid *rx_tid,
struct sk_buff **defrag_skb)
{
struct hal_rx_desc *rx_desc;
struct sk_buff *skb, *first_frag, *last_frag;
struct ieee80211_hdr *hdr;
struct rx_attention *rx_attention;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
int msdu_len = 0;
int extra_space;
u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
first_frag = skb_peek(&rx_tid->rx_frags);
last_frag = skb_peek_tail(&rx_tid->rx_frags);
skb_queue_walk(&rx_tid->rx_frags, skb) {
flags = 0;
rx_desc = (struct hal_rx_desc *)skb->data;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
}
if (is_decrypted) {
if (skb != first_frag)
flags |= RX_FLAG_IV_STRIPPED;
if (skb != last_frag)
flags |= RX_FLAG_ICV_STRIPPED |
RX_FLAG_MIC_STRIPPED;
}
/* RX fragments are always raw packets */
if (skb != last_frag)
skb_trim(skb, skb->len - FCS_LEN);
ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
if (skb != first_frag)
skb_pull(skb, hal_rx_desc_sz +
ieee80211_hdrlen(hdr->frame_control));
msdu_len += skb->len;
}
extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
if (extra_space > 0 &&
(pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
return -ENOMEM;
__skb_unlink(first_frag, &rx_tid->rx_frags);
while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
skb_put_data(first_frag, skb->data, skb->len);
dev_kfree_skb_any(skb);
}
hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
first_frag = NULL;
*defrag_skb = first_frag;
return 0;
}
static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
struct sk_buff *defrag_skb)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_pdev_dp *dp = &ar->dp;
struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
struct hal_reo_entrance_ring *reo_ent_ring;
struct hal_reo_dest_ring *reo_dest_ring;
struct dp_link_desc_bank *link_desc_banks;
struct hal_rx_msdu_link *msdu_link;
struct hal_rx_msdu_details *msdu0;
struct hal_srng *srng;
dma_addr_t paddr;
u32 desc_bank, msdu_info, mpdu_info;
u32 dst_idx, cookie, hal_rx_desc_sz;
int ret, buf_id;
hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
link_desc_banks = ab->dp.link_desc_banks;
reo_dest_ring = rx_tid->dst_ring_desc;
ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
(paddr - link_desc_banks[desc_bank].paddr));
msdu0 = &msdu_link->msdu_link[0];
dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
memset(msdu0, 0, sizeof(*msdu0));
msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
defrag_skb->len - hal_rx_desc_sz) |
FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
msdu0->rx_msdu_info.info0 = msdu_info;
/* change msdu len in hal rx desc */
ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, paddr))
return -ENOMEM;
spin_lock_bh(&rx_refill_ring->idr_lock);
buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
spin_unlock_bh(&rx_refill_ring->idr_lock);
if (buf_id < 0) {
ret = -ENOMEM;
goto err_unmap_dma;
}
ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
ab->hw_params.hal_params->rx_buf_rbm);
/* Fill mpdu details into reo entrance ring */
srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
reo_ent_ring = (struct hal_reo_entrance_ring *)
ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!reo_ent_ring) {
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
ret = -ENOSPC;
goto err_free_idr;
}
memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
reo_dest_ring->info0)) |
FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return 0;
err_free_idr:
spin_lock_bh(&rx_refill_ring->idr_lock);
idr_remove(&rx_refill_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_refill_ring->idr_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
DMA_TO_DEVICE);
return ret;
}
static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
struct sk_buff *a, struct sk_buff *b)
{
int frag1, frag2;
frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
return frag1 - frag2;
}
static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
struct sk_buff_head *frag_list,
struct sk_buff *cur_frag)
{
struct sk_buff *skb;
int cmp;
skb_queue_walk(frag_list, skb) {
cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
if (cmp < 0)
continue;
__skb_queue_before(frag_list, skb, cur_frag);
return;
}
__skb_queue_tail(frag_list, cur_frag);
}
static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
pn = ehdr[0];
pn |= (u64)ehdr[1] << 8;
pn |= (u64)ehdr[4] << 16;
pn |= (u64)ehdr[5] << 24;
pn |= (u64)ehdr[6] << 32;
pn |= (u64)ehdr[7] << 40;
return pn;
}
static bool
ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
{
enum hal_encrypt_type encrypt_type;
struct sk_buff *first_frag, *skb;
struct hal_rx_desc *desc;
u64 last_pn;
u64 cur_pn;
first_frag = skb_peek(&rx_tid->rx_frags);
desc = (struct hal_rx_desc *)first_frag->data;
encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
return true;
last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
skb_queue_walk(&rx_tid->rx_frags, skb) {
if (skb == first_frag)
continue;
cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
if (cur_pn != last_pn + 1)
return false;
last_pn = cur_pn;
}
return true;
}
static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
struct sk_buff *msdu,
u32 *ring_desc)
{
struct ath11k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc;
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
struct sk_buff *defrag_skb = NULL;
u32 peer_id;
u16 seqno, frag_no;
u8 tid;
int ret = 0;
bool more_frags;
bool is_mcbc;
rx_desc = (struct hal_rx_desc *)msdu->data;
peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
/* Multicast/Broadcast fragments are not expected */
if (is_mcbc)
return -EINVAL;
if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
!ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
tid > IEEE80211_NUM_TIDS)
return -EINVAL;
/* received unfragmented packet in reo
* exception ring, this shouldn't happen
* as these packets typically come from
* reo2sw srngs.
*/
if (WARN_ON_ONCE(!frag_no && !more_frags))
return -EINVAL;
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, peer_id);
if (!peer) {
ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
peer_id);
ret = -ENOENT;
goto out_unlock;
}
if (!peer->dp_setup_done) {
ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
peer->addr, peer_id);
ret = -ENOENT;
goto out_unlock;
}
rx_tid = &peer->rx_tid[tid];
if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
skb_queue_empty(&rx_tid->rx_frags)) {
/* Flush stored fragments and start a new sequence */
ath11k_dp_rx_frags_cleanup(rx_tid, true);
rx_tid->cur_sn = seqno;
}
if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
/* Fragment already present */
ret = -EINVAL;
goto out_unlock;
}
if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
__skb_queue_tail(&rx_tid->rx_frags, msdu);
else
ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
rx_tid->rx_frag_bitmap |= BIT(frag_no);
if (!more_frags)
rx_tid->last_frag_no = frag_no;
if (frag_no == 0) {
rx_tid->dst_ring_desc = kmemdup(ring_desc,
sizeof(*rx_tid->dst_ring_desc),
GFP_ATOMIC);
if (!rx_tid->dst_ring_desc) {
ret = -ENOMEM;
goto out_unlock;
}
} else {
ath11k_dp_rx_link_desc_return(ab, ring_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
if (!rx_tid->last_frag_no ||
rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
mod_timer(&rx_tid->frag_timer, jiffies +
ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
goto out_unlock;
}
spin_unlock_bh(&ab->base_lock);
del_timer_sync(&rx_tid->frag_timer);
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, peer_id);
if (!peer)
goto err_frags_cleanup;
if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
goto err_frags_cleanup;
if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
goto err_frags_cleanup;
if (!defrag_skb)
goto err_frags_cleanup;
if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
goto err_frags_cleanup;
ath11k_dp_rx_frags_cleanup(rx_tid, false);
goto out_unlock;
err_frags_cleanup:
dev_kfree_skb_any(defrag_skb);
ath11k_dp_rx_frags_cleanup(rx_tid, true);
out_unlock:
spin_unlock_bh(&ab->base_lock);
return ret;
}
static int
ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
struct sk_buff *msdu;
struct ath11k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
u8 *hdr_status;
u16 msdu_len;
u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
if (!msdu) {
ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
return -EINVAL;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
rxcb = ATH11K_SKB_RXCB(msdu);
dma_unmap_single(ar->ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
if (drop) {
dev_kfree_skb_any(msdu);
return 0;
}
rcu_read_lock();
if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
dev_kfree_skb_any(msdu);
goto exit;
}
if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
goto exit;
}
rx_desc = (struct hal_rx_desc *)msdu->data;
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
sizeof(struct ieee80211_hdr));
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
sizeof(struct hal_rx_desc));
dev_kfree_skb_any(msdu);
goto exit;
}
skb_put(msdu, hal_rx_desc_sz + msdu_len);
if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
dev_kfree_skb_any(msdu);
ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
exit:
rcu_read_unlock();
return 0;
}
int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
int budget)
{
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
int tot_n_bufs_reaped, quota, ret, i;
int n_bufs_reaped[MAX_RADIOS] = {0};
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
u32 desc_bank, num_msdus;
struct hal_srng *srng;
struct ath11k_dp *dp;
void *link_desc_va;
int buf_id, mac_id;
struct ath11k *ar;
dma_addr_t paddr;
u32 *desc;
bool is_frag;
u8 drop = 0;
tot_n_bufs_reaped = 0;
quota = budget;
dp = &ab->dp;
reo_except = &dp->reo_except_ring;
link_desc_banks = dp->link_desc_banks;
srng = &ab->hal.srng_list[reo_except->ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
while (budget &&
(desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
ab->soc_stats.err_ring_pkts++;
ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
&desc_bank);
if (ret) {
ath11k_warn(ab, "failed to parse error reo desc %d\n",
ret);
continue;
}
link_desc_va = link_desc_banks[desc_bank].vaddr +
(paddr - link_desc_banks[desc_bank].paddr);
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
rbm != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
ath11k_dp_rx_link_desc_return(ab, desc,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
/* Process only rx fragments with one msdu per link desc below, and drop
* msdu's indicated due to error reasons.
*/
if (!is_frag || num_msdus > 1) {
drop = 1;
/* Return the link desc back to wbm idle list */
ath11k_dp_rx_link_desc_return(ab, desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
for (i = 0; i < num_msdus; i++) {
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
msdu_cookies[i]);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
msdu_cookies[i]);
ar = ab->pdevs[mac_id].ar;
if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
n_bufs_reaped[mac_id]++;
tot_n_bufs_reaped++;
}
}
if (tot_n_bufs_reaped >= quota) {
tot_n_bufs_reaped = quota;
goto exit;
}
budget = quota - tot_n_bufs_reaped;
}
exit:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
for (i = 0; i < ab->num_radios; i++) {
if (!n_bufs_reaped[i])
continue;
ar = ab->pdevs[i].ar;
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
ab->hw_params.hal_params->rx_buf_rbm);
}
return tot_n_bufs_reaped;
}
static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
int msdu_len,
struct sk_buff_head *msdu_list)
{
struct sk_buff *skb, *tmp;
struct ath11k_skb_rxcb *rxcb;
int n_buffs;
n_buffs = DIV_ROUND_UP(msdu_len,
(DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
skb_queue_walk_safe(msdu_list, skb, tmp) {
rxcb = ATH11K_SKB_RXCB(skb);
if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
if (!n_buffs)
break;
__skb_unlink(skb, msdu_list);
dev_kfree_skb_any(skb);
n_buffs--;
}
}
}
static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status,
struct sk_buff_head *msdu_list)
{
u16 msdu_len;
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
struct rx_attention *rx_attention;
u8 l3pad_bytes;
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
/* First buffer will be freed by the caller, so deduct it's length */
msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
return -EINVAL;
}
rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
ath11k_warn(ar->ab,
"msdu_done bit not set in null_q_des processing\n");
__skb_queue_purge(msdu_list);
return -EIO;
}
/* Handle NULL queue descriptor violations arising out a missing
* REO queue for a given peer or a given TID. This typically
* may happen if a packet is received on a QOS enabled TID before the
* ADDBA negotiation for that TID, when the TID queue is setup. Or
* it may also happen for MC/BC frames if they are not routed to the
* non-QOS TID queue, in the absence of any other default TID queue.
* This error can show up both in a REO destination or WBM release ring.
*/
rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
if (rxcb->is_frag) {
skb_pull(msdu, hal_rx_desc_sz);
} else {
l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
return -EINVAL;
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
}
ath11k_dp_rx_h_ppdu(ar, desc, status);
ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
/* Please note that caller will having the access to msdu and completing
* rx with mac80211. Need not worry about cleaning up amsdu_list.
*/
return 0;
}
static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status,
struct sk_buff_head *msdu_list)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
bool drop = false;
ar->ab->soc_stats.reo_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
drop = true;
break;
case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
/* TODO: Do not drop PN failed packets in the driver;
* instead, it is good to drop such packets in mac80211
* after incrementing the replay counters.
*/
fallthrough;
default:
/* TODO: Review other errors and process them to mac80211
* as appropriate.
*/
drop = true;
break;
}
return drop;
}
static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status)
{
u16 msdu_len;
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
ath11k_dp_rx_h_ppdu(ar, desc, status);
status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED);
ath11k_dp_rx_h_undecap(ar, msdu, desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
}
static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
bool drop = false;
ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
break;
default:
/* TODO: Review other rxdma error code to check if anything is
* worth reporting to mac80211
*/
drop = true;
break;
}
return drop;
}
static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
struct napi_struct *napi,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
struct ieee80211_rx_status rxs = {0};
bool drop = true;
switch (rxcb->err_rel_src) {
case HAL_WBM_REL_SRC_MODULE_REO:
drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
break;
case HAL_WBM_REL_SRC_MODULE_RXDMA:
drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
break;
default:
/* msdu will get freed */
break;
}
if (drop) {
dev_kfree_skb_any(msdu);
return;
}
ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
}
int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
struct napi_struct *napi, int budget)
{
struct ath11k *ar;
struct ath11k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
struct sk_buff_head msdu_list[MAX_RADIOS];
struct ath11k_skb_rxcb *rxcb;
u32 *rx_desc;
int buf_id, mac_id;
int num_buffs_reaped[MAX_RADIOS] = {0};
int total_num_buffs_reaped = 0;
int ret, i;
for (i = 0; i < ab->num_radios; i++)
__skb_queue_head_init(&msdu_list[i]);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
while (budget) {
rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
if (!rx_desc)
break;
ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
if (ret) {
ath11k_warn(ab,
"failed to parse rx error in wbm_rel ring desc %d\n",
ret);
continue;
}
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
ar = ab->pdevs[mac_id].ar;
rx_ring = &ar->dp.rx_refill_buf_ring;
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
if (!msdu) {
ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
buf_id, mac_id);
spin_unlock_bh(&rx_ring->idr_lock);
continue;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
rxcb = ATH11K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
num_buffs_reaped[mac_id]++;
total_num_buffs_reaped++;
budget--;
if (err_info.push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
dev_kfree_skb_any(msdu);
continue;
}
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
__skb_queue_tail(&msdu_list[mac_id], msdu);
}
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
if (!total_num_buffs_reaped)
goto done;
for (i = 0; i < ab->num_radios; i++) {
if (!num_buffs_reaped[i])
continue;
ar = ab->pdevs[i].ar;
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
ab->hw_params.hal_params->rx_buf_rbm);
}
rcu_read_lock();
for (i = 0; i < ab->num_radios; i++) {
if (!rcu_dereference(ab->pdevs_active[i])) {
__skb_queue_purge(&msdu_list[i]);
continue;
}
ar = ab->pdevs[i].ar;
if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
__skb_queue_purge(&msdu_list[i]);
continue;
}
while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
}
rcu_read_unlock();
done:
return total_num_buffs_reaped;
}
int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
{
struct ath11k *ar;
struct dp_srng *err_ring;
struct dp_rxdma_ring *rx_ring;
struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
struct hal_srng *srng;
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
enum hal_rx_buf_return_buf_manager rbm;
enum hal_reo_entr_rxdma_ecode rxdma_err_code;
struct ath11k_skb_rxcb *rxcb;
struct sk_buff *skb;
struct hal_reo_entrance_ring *entr_ring;
void *desc;
int num_buf_freed = 0;
int quota = budget;
dma_addr_t paddr;
u32 desc_bank;
void *link_desc_va;
int num_msdus;
int i;
int buf_id;
ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
mac_id)];
rx_ring = &ar->dp.rx_refill_buf_ring;
srng = &ab->hal.srng_list[err_ring->ring_id];
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
while (quota-- &&
(desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
entr_ring = (struct hal_reo_entrance_ring *)desc;
rxdma_err_code =
FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
entr_ring->info1);
ab->soc_stats.rxdma_error[rxdma_err_code]++;
link_desc_va = link_desc_banks[desc_bank].vaddr +
(paddr - link_desc_banks[desc_bank].paddr);
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
msdu_cookies, &rbm);
for (i = 0; i < num_msdus; i++) {
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
msdu_cookies[i]);
spin_lock_bh(&rx_ring->idr_lock);
skb = idr_find(&rx_ring->bufs_idr, buf_id);
if (!skb) {
ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
continue;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
rxcb = ATH11K_SKB_RXCB(skb);
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
num_buf_freed++;
}
ath11k_dp_rx_link_desc_return(ab, desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
if (num_buf_freed)
ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
ab->hw_params.hal_params->rx_buf_rbm);
return budget - quota;
}
void ath11k_dp_process_reo_status(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
struct dp_reo_cmd *cmd, *tmp;
bool found = false;
u32 *reo_desc;
u16 tag;
struct hal_reo_status reo_status;
srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
memset(&reo_status, 0, sizeof(reo_status));
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
switch (tag) {
case HAL_REO_GET_QUEUE_STATS_STATUS:
ath11k_hal_reo_status_queue_stats(ab, reo_desc,
&reo_status);
break;
case HAL_REO_FLUSH_QUEUE_STATUS:
ath11k_hal_reo_flush_queue_status(ab, reo_desc,
&reo_status);
break;
case HAL_REO_FLUSH_CACHE_STATUS:
ath11k_hal_reo_flush_cache_status(ab, reo_desc,
&reo_status);
break;
case HAL_REO_UNBLOCK_CACHE_STATUS:
ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
&reo_status);
break;
case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
&reo_status);
break;
case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
&reo_status);
break;
case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
&reo_status);
break;
default:
ath11k_warn(ab, "Unknown reo status type %d\n", tag);
continue;
}
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
found = true;
list_del(&cmd->list);
break;
}
}
spin_unlock_bh(&dp->reo_cmd_lock);
if (found) {
cmd->handler(dp, (void *)&cmd->data,
reo_status.uniform_hdr.cmd_status);
kfree(cmd);
}
found = false;
}
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
}
void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
{
struct ath11k *ar = ab->pdevs[mac_id].ar;
ath11k_dp_rx_pdev_srng_free(ar);
ath11k_dp_rxdma_pdev_buf_free(ar);
}
int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
{
struct ath11k *ar = ab->pdevs[mac_id].ar;
struct ath11k_pdev_dp *dp = &ar->dp;
u32 ring_id;
int i;
int ret;
ret = ath11k_dp_rx_pdev_srng_alloc(ar);
if (ret) {
ath11k_warn(ab, "failed to setup rx srngs\n");
return ret;
}
ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
if (ret) {
ath11k_warn(ab, "failed to setup rxdma ring\n");
return ret;
}
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
if (ret) {
ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
ret);
return ret;
}
if (ab->hw_params.rx_mac_buf_ring) {
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i, HAL_RXDMA_BUF);
if (ret) {
ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
i, ret);
return ret;
}
}
}
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = dp->rxdma_err_dst_ring[i].ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i, HAL_RXDMA_DST);
if (ret) {
ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
i, ret);
return ret;
}
}
if (!ab->hw_params.rxdma1_enable)
goto config_refill_ring;
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id, HAL_RXDMA_MONITOR_BUF);
if (ret) {
ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
ret);
return ret;
}
ret = ath11k_dp_tx_htt_srng_setup(ab,
dp->rxdma_mon_dst_ring.ring_id,
mac_id, HAL_RXDMA_MONITOR_DST);
if (ret) {
ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
ret);
return ret;
}
ret = ath11k_dp_tx_htt_srng_setup(ab,
dp->rxdma_mon_desc_ring.ring_id,
mac_id, HAL_RXDMA_MONITOR_DESC);
if (ret) {
ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
ret);
return ret;
}
config_refill_ring:
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
HAL_RXDMA_MONITOR_STATUS);
if (ret) {
ath11k_warn(ab,
"failed to configure mon_status_refill_ring%d %d\n",
i, ret);
return ret;
}
}
return 0;
}
static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
{
if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
*frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
*total_len -= *frag_len;
} else {
*frag_len = *total_len;
*total_len = 0;
}
}
static
int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
void *p_last_buf_addr_info,
u8 mac_id)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct dp_srng *dp_srng;
void *hal_srng;
void *src_srng_desc;
int ret = 0;
if (ar->ab->hw_params.rxdma1_enable) {
dp_srng = &dp->rxdma_mon_desc_ring;
hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
} else {
dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
}
ath11k_hal_srng_access_begin(ar->ab, hal_srng);
src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
if (src_srng_desc) {
struct ath11k_buffer_addr *src_desc =
(struct ath11k_buffer_addr *)src_srng_desc;
*src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
} else {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"Monitor Link Desc Ring %d Full", mac_id);
ret = -ENOMEM;
}
ath11k_hal_srng_access_end(ar->ab, hal_srng);
return ret;
}
static
void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
dma_addr_t *paddr, u32 *sw_cookie,
u8 *rbm,
void **pp_buf_addr_info)
{
struct hal_rx_msdu_link *msdu_link =
(struct hal_rx_msdu_link *)rx_msdu_link_desc;
struct ath11k_buffer_addr *buf_addr_info;
buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
*pp_buf_addr_info = (void *)buf_addr_info;
}
static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
{
if (skb->len > len) {
skb_trim(skb, len);
} else {
if (skb_tailroom(skb) < len - skb->len) {
if ((pskb_expand_head(skb, 0,
len - skb->len - skb_tailroom(skb),
GFP_ATOMIC))) {
dev_kfree_skb_any(skb);
return -ENOMEM;
}
}
skb_put(skb, (len - skb->len));
}
return 0;
}
static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
void *msdu_link_desc,
struct hal_rx_msdu_list *msdu_list,
u16 *num_msdus)
{
struct hal_rx_msdu_details *msdu_details = NULL;
struct rx_msdu_desc *msdu_desc_info = NULL;
struct hal_rx_msdu_link *msdu_link = NULL;
int i;
u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
u8 tmp = 0;
msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
msdu_details = &msdu_link->msdu_link[0];
for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
msdu_details[i].buf_addr_info.info0) == 0) {
msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
msdu_desc_info->info0 |= last;
;
break;
}
msdu_desc_info = &msdu_details[i].rx_msdu_info;
if (!i)
msdu_desc_info->info0 |= first;
else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
msdu_desc_info->info0 |= last;
msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
msdu_list->msdu_info[i].msdu_len =
HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
msdu_list->sw_cookie[i] =
FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
msdu_details[i].buf_addr_info.info1);
tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
msdu_details[i].buf_addr_info.info1);
msdu_list->rbm[i] = tmp;
}
*num_msdus = i;
}
static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
u32 *rx_bufs_used)
{
u32 ret = 0;
if ((*ppdu_id < msdu_ppdu_id) &&
((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
*ppdu_id = msdu_ppdu_id;
ret = msdu_ppdu_id;
} else if ((*ppdu_id > msdu_ppdu_id) &&
((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
/* mon_dst is behind than mon_status
* skip dst_ring and free it
*/
*rx_bufs_used += 1;
*ppdu_id = msdu_ppdu_id;
ret = msdu_ppdu_id;
}
return ret;
}
static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
bool *is_frag, u32 *total_len,
u32 *frag_len, u32 *msdu_cnt)
{
if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
if (!*is_frag) {
*total_len = info->msdu_len;
*is_frag = true;
}
ath11k_dp_mon_set_frag_len(total_len,
frag_len);
} else {
if (*is_frag) {
ath11k_dp_mon_set_frag_len(total_len,
frag_len);
} else {
*frag_len = info->msdu_len;
}
*is_frag = false;
*msdu_cnt -= 1;
}
}
static u32
ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
void *ring_entry, struct sk_buff **head_msdu,
struct sk_buff **tail_msdu, u32 *npackets,
u32 *ppdu_id)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
struct sk_buff *msdu = NULL, *last = NULL;
struct hal_rx_msdu_list msdu_list;
void *p_buf_addr_info, *p_last_buf_addr_info;
struct hal_rx_desc *rx_desc;
void *rx_msdu_link_desc;
dma_addr_t paddr;
u16 num_msdus = 0;
u32 rx_buf_size, rx_pkt_offset, sw_cookie;
u32 rx_bufs_used = 0, i = 0;
u32 msdu_ppdu_id = 0, msdu_cnt = 0;
u32 total_len = 0, frag_len = 0;
bool is_frag, is_first_msdu;
bool drop_mpdu = false;
struct ath11k_skb_rxcb *rxcb;
struct hal_reo_entrance_ring *ent_desc =
(struct hal_reo_entrance_ring *)ring_entry;
int buf_id;
u32 rx_link_buf_info[2];
u8 rbm;
if (!ar->ab->hw_params.rxdma1_enable)
rx_ring = &dp->rx_refill_buf_ring;
ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
&sw_cookie,
&p_last_buf_addr_info, &rbm,
&msdu_cnt);
if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
ent_desc->info1) ==
HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
u8 rxdma_err =
FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
ent_desc->info1);
if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
drop_mpdu = true;
pmon->rx_mon_stats.dest_mpdu_drop++;
}
}
is_frag = false;
is_first_msdu = true;
do {
if (pmon->mon_last_linkdesc_paddr == paddr) {
pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
return rx_bufs_used;
}
if (ar->ab->hw_params.rxdma1_enable)
rx_msdu_link_desc =
(void *)pmon->link_desc_banks[sw_cookie].vaddr +
(paddr - pmon->link_desc_banks[sw_cookie].paddr);
else
rx_msdu_link_desc =
(void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
(paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
&num_msdus);
for (i = 0; i < num_msdus; i++) {
u32 l2_hdr_offset;
if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"i %d last_cookie %d is same\n",
i, pmon->mon_last_buf_cookie);
drop_mpdu = true;
pmon->rx_mon_stats.dup_mon_buf_cnt++;
continue;
}
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
msdu_list.sw_cookie[i]);
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
if (!msdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"msdu_pop: invalid buf_id %d\n", buf_id);
break;
}
rxcb = ATH11K_SKB_RXCB(msdu);
if (!rxcb->unmapped) {
dma_unmap_single(ar->ab->dev, rxcb->paddr,
msdu->len +
skb_tailroom(msdu),
DMA_FROM_DEVICE);
rxcb->unmapped = 1;
}
if (drop_mpdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"i %d drop msdu %p *ppdu_id %x\n",
i, msdu, *ppdu_id);
dev_kfree_skb_any(msdu);
msdu = NULL;
goto next_msdu;
}
rx_desc = (struct hal_rx_desc *)msdu->data;
rx_pkt_offset = sizeof(struct hal_rx_desc);
l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
if (is_first_msdu) {
if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
drop_mpdu = true;
dev_kfree_skb_any(msdu);
msdu = NULL;
pmon->mon_last_linkdesc_paddr = paddr;
goto next_msdu;
}
msdu_ppdu_id =
ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
ppdu_id,
&rx_bufs_used)) {
if (rx_bufs_used) {
drop_mpdu = true;
dev_kfree_skb_any(msdu);
msdu = NULL;
goto next_msdu;
}
return rx_bufs_used;
}
pmon->mon_last_linkdesc_paddr = paddr;
is_first_msdu = false;
}
ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
&is_frag, &total_len,
&frag_len, &msdu_cnt);
rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
if (!(*head_msdu))
*head_msdu = msdu;
else if (last)
last->next = msdu;
last = msdu;
next_msdu:
pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
rx_bufs_used++;
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
}
ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
&sw_cookie, &rbm,
&p_buf_addr_info);
if (ar->ab->hw_params.rxdma1_enable) {
if (ath11k_dp_rx_monitor_link_desc_return(ar,
p_last_buf_addr_info,
dp->mac_id))
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"dp_rx_monitor_link_desc_return failed");
} else {
ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
p_last_buf_addr_info = p_buf_addr_info;
} while (paddr && msdu_cnt);
if (last)
last->next = NULL;
*tail_msdu = msdu;
if (msdu_cnt == 0)
*npackets = 1;
return rx_bufs_used;
}
static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
{
u32 rx_pkt_offset, l2_hdr_offset;
rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
(struct hal_rx_desc *)msdu->data);
skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
}
static struct sk_buff *
ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
u32 mac_id, struct sk_buff *head_msdu,
struct sk_buff *last_msdu,
struct ieee80211_rx_status *rxs, bool *fcs_err)
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *prev_buf;
struct hal_rx_desc *rx_desc;
char *hdr_desc;
u8 *dest, decap_format;
struct ieee80211_hdr_3addr *wh;
struct rx_attention *rx_attention;
u32 err_bitmap;
if (!head_msdu)
goto err_merge_fail;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
if (err_bitmap & DP_RX_MPDU_ERR_FCS)
*fcs_err = true;
if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
return NULL;
decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
prev_buf = head_msdu;
msdu = head_msdu->next;
while (msdu) {
ath11k_dp_rx_msdus_set_payload(ar, msdu);
prev_buf = msdu;
msdu = msdu->next;
}
prev_buf->next = NULL;
skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
/* Base size */
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
if (ieee80211_is_data_qos(wh->frame_control))
qos_pkt = 1;
msdu = head_msdu;
while (msdu) {
ath11k_dp_rx_msdus_set_payload(ar, msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
goto err_merge_fail;
memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
}
prev_buf = msdu;
msdu = msdu->next;
}
dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
if (!dest)
goto err_merge_fail;
ath11k_dbg(ab, ATH11K_DBG_DATA,
"mpdu_buf %p mpdu_buf->len %u",
prev_buf, prev_buf->len);
} else {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"decap format %d is not supported!\n",
decap_format);
goto err_merge_fail;
}
return head_msdu;
err_merge_fail:
return NULL;
}
static void
ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
u8 *rtap_buf)
{
u32 rtap_len = 0;
put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
}
static void
ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
u8 *rtap_buf)
{
u32 rtap_len = 0;
put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
rtap_len += 2;
rtap_buf[rtap_len] = rx_status->he_RU[0];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[1];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[2];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[3];
}
static void ath11k_update_radiotap(struct ath11k *ar,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *mon_skb,
struct ieee80211_rx_status *rxs)
{
struct ieee80211_supported_band *sband;
u8 *ptr = NULL;
rxs->flag |= RX_FLAG_MACTIME_START;
rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
if (ppduinfo->nss)
rxs->nss = ppduinfo->nss;
if (ppduinfo->he_mu_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
} else if (ppduinfo->he_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
rxs->rate_idx = ppduinfo->rate;
} else if (ppduinfo->vht_flags) {
rxs->encoding = RX_ENC_VHT;
rxs->rate_idx = ppduinfo->rate;
} else if (ppduinfo->ht_flags) {
rxs->encoding = RX_ENC_HT;
rxs->rate_idx = ppduinfo->rate;
} else {
rxs->encoding = RX_ENC_LEGACY;
sband = &ar->mac.sbands[rxs->band];
rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
ppduinfo->cck_flag);
}
rxs->mactime = ppduinfo->tsft;
}
static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
struct sk_buff *head_msdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *tail_msdu,
struct napi_struct *napi)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
struct ieee80211_rx_status *rxs = &dp->rx_status;
bool fcs_err = false;
mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
tail_msdu, rxs, &fcs_err);
if (!mon_skb)
goto mon_deliver_fail;
header = mon_skb;
rxs->flag = 0;
if (fcs_err)
rxs->flag = RX_FLAG_FAILED_FCS_CRC;
do {
skb_next = mon_skb->next;
if (!skb_next)
rxs->flag &= ~RX_FLAG_AMSDU_MORE;
else
rxs->flag |= RX_FLAG_AMSDU_MORE;
if (mon_skb == header) {
header = NULL;
rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
} else {
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
return 0;
mon_deliver_fail:
mon_skb = head_msdu;
while (mon_skb) {
skb_next = mon_skb->next;
dev_kfree_skb_any(mon_skb);
mon_skb = skb_next;
}
return -EINVAL;
}
/* The destination ring processing is stuck if the destination is not
* moving while status ring moves 16 PPDU. The destination ring processing
* skips this destination ring PPDU as a workaround.
*/
#define MON_DEST_RING_STUCK_MAX_CNT 16
static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 quota, struct napi_struct *napi)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
const struct ath11k_hw_hal_params *hal_params;
void *ring_entry;
void *mon_dst_srng;
u32 ppdu_id;
u32 rx_bufs_used;
u32 ring_id;
struct ath11k_pdev_mon_stats *rx_mon_stats;
u32 npackets = 0;
u32 mpdu_rx_bufs_used;
if (ar->ab->hw_params.rxdma1_enable)
ring_id = dp->rxdma_mon_dst_ring.ring_id;
else
ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
if (!mon_dst_srng) {
ath11k_warn(ar->ab,
"HAL Monitor Destination Ring Init Failed -- %p",
mon_dst_srng);
return;
}
spin_lock_bh(&pmon->mon_lock);
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
ppdu_id = pmon->mon_ppdu_info.ppdu_id;
rx_bufs_used = 0;
rx_mon_stats = &pmon->rx_mon_stats;
while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
struct sk_buff *head_msdu, *tail_msdu;
head_msdu = NULL;
tail_msdu = NULL;
mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
&head_msdu,
&tail_msdu,
&npackets, &ppdu_id);
rx_bufs_used += mpdu_rx_bufs_used;
if (mpdu_rx_bufs_used) {
dp->mon_dest_ring_stuck_cnt = 0;
} else {
dp->mon_dest_ring_stuck_cnt++;
rx_mon_stats->dest_mon_not_reaped++;
}
if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
rx_mon_stats->dest_mon_stuck++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
pmon->mon_ppdu_info.ppdu_id, ppdu_id,
dp->mon_dest_ring_stuck_cnt,
rx_mon_stats->dest_mon_not_reaped,
rx_mon_stats->dest_mon_stuck);
pmon->mon_ppdu_info.ppdu_id = ppdu_id;
continue;
}
if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
ppdu_id, pmon->mon_ppdu_info.ppdu_id,
rx_mon_stats->dest_mon_not_reaped,
rx_mon_stats->dest_mon_stuck);
break;
}
if (head_msdu && tail_msdu) {
ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
&pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
}
ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
mon_dst_srng);
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
spin_unlock_bh(&pmon->mon_lock);
if (rx_bufs_used) {
rx_mon_stats->dest_ppdu_done++;
hal_params = ar->ab->hw_params.hal_params;
if (ar->ab->hw_params.rxdma1_enable)
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
&dp->rxdma_mon_buf_ring,
rx_bufs_used,
hal_params->rx_buf_rbm);
else
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
&dp->rx_refill_buf_ring,
rx_bufs_used,
hal_params->rx_buf_rbm);
}
}
int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
enum hal_rx_mon_status hal_status;
struct sk_buff *skb;
struct sk_buff_head skb_list;
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
int num_buffs_reaped = 0;
u32 rx_buf_sz;
u16 log_type;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
__skb_queue_head_init(&skb_list);
num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
&skb_list);
if (!num_buffs_reaped)
goto exit;
memset(ppdu_info, 0, sizeof(*ppdu_info));
ppdu_info->peer_id = HAL_INVALID_PEERID;
while ((skb = __skb_dequeue(&skb_list))) {
if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
rx_buf_sz = DP_RX_BUFFER_SIZE;
} else {
log_type = ATH11K_PKTLOG_TYPE_INVALID;
rx_buf_sz = 0;
}
if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
memset(ppdu_info, 0, sizeof(*ppdu_info));
ppdu_info->peer_id = HAL_INVALID_PEERID;
hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
hal_status == HAL_TLV_STATUS_PPDU_DONE) {
rx_mon_stats->status_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
}
if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
dev_kfree_skb_any(skb);
continue;
}
rcu_read_lock();
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
if (!peer || !peer->sta) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"failed to find the peer with peer_id %d\n",
ppdu_info->peer_id);
goto next_skb;
}
arsta = (struct ath11k_sta *)peer->sta->drv_priv;
ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
next_skb:
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
dev_kfree_skb_any(skb);
memset(ppdu_info, 0, sizeof(*ppdu_info));
ppdu_info->peer_id = HAL_INVALID_PEERID;
}
exit:
return num_buffs_reaped;
}
static u32
ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
void *ring_entry, struct sk_buff **head_msdu,
struct sk_buff **tail_msdu,
struct hal_sw_mon_ring_entries *sw_mon_entries)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = &dp->mon_data;
struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
struct sk_buff *msdu = NULL, *last = NULL;
struct hal_sw_monitor_ring *sw_desc = ring_entry;
struct hal_rx_msdu_list msdu_list;
struct hal_rx_desc *rx_desc;
struct ath11k_skb_rxcb *rxcb;
void *rx_msdu_link_desc;
void *p_buf_addr_info, *p_last_buf_addr_info;
int buf_id, i = 0;
u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
u32 rx_bufs_used = 0, msdu_cnt = 0;
u32 total_len = 0, frag_len = 0, sw_cookie;
u16 num_msdus = 0;
u8 rxdma_err, rbm;
bool is_frag, is_first_msdu;
bool drop_mpdu = false;
ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
sw_mon_entries->end_of_ppdu = false;
sw_mon_entries->drop_ppdu = false;
p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
msdu_cnt = sw_mon_entries->msdu_cnt;
sw_mon_entries->end_of_ppdu =
FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
if (sw_mon_entries->end_of_ppdu)
return rx_bufs_used;
if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
sw_desc->info0) ==
HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
rxdma_err =
FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
sw_desc->info0);
if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
pmon->rx_mon_stats.dest_mpdu_drop++;
drop_mpdu = true;
}
}
is_frag = false;
is_first_msdu = true;
do {
rx_msdu_link_desc =
(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
(sw_mon_entries->mon_dst_paddr -
pmon->link_desc_banks[sw_cookie].paddr);
ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
&num_msdus);
for (i = 0; i < num_msdus; i++) {
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
msdu_list.sw_cookie[i]);
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
if (!msdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"full mon msdu_pop: invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
break;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
rxcb = ATH11K_SKB_RXCB(msdu);
if (!rxcb->unmapped) {
dma_unmap_single(ar->ab->dev, rxcb->paddr,
msdu->len +
skb_tailroom(msdu),
DMA_FROM_DEVICE);
rxcb->unmapped = 1;
}
if (drop_mpdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"full mon: i %d drop msdu %p *ppdu_id %x\n",
i, msdu, sw_mon_entries->ppdu_id);
dev_kfree_skb_any(msdu);
msdu_cnt--;
goto next_msdu;
}
rx_desc = (struct hal_rx_desc *)msdu->data;
rx_pkt_offset = sizeof(struct hal_rx_desc);
l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
if (is_first_msdu) {
if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
drop_mpdu = true;
dev_kfree_skb_any(msdu);
msdu = NULL;
goto next_msdu;
}
is_first_msdu = false;
}
ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
&is_frag, &total_len,
&frag_len, &msdu_cnt);
rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
if (!(*head_msdu))
*head_msdu = msdu;
else if (last)
last->next = msdu;
last = msdu;
next_msdu:
rx_bufs_used++;
}
ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
&sw_mon_entries->mon_dst_paddr,
&sw_mon_entries->mon_dst_sw_cookie,
&rbm,
&p_buf_addr_info);
if (ath11k_dp_rx_monitor_link_desc_return(ar,
p_last_buf_addr_info,
dp->mac_id))
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"full mon: dp_rx_monitor_link_desc_return failed\n");
p_last_buf_addr_info = p_buf_addr_info;
} while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
if (last)
last->next = NULL;
*tail_msdu = msdu;
return rx_bufs_used;
}
static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
struct dp_full_mon_mpdu *mon_mpdu,
struct sk_buff *head,
struct sk_buff *tail)
{
mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
if (!mon_mpdu)
return -ENOMEM;
list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
mon_mpdu->head = head;
mon_mpdu->tail = tail;
return 0;
}
static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
struct dp_full_mon_mpdu *mon_mpdu)
{
struct dp_full_mon_mpdu *tmp;
struct sk_buff *tmp_msdu, *skb_next;
if (list_empty(&dp->dp_full_mon_mpdu_list))
return;
list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
tmp_msdu = mon_mpdu->head;
while (tmp_msdu) {
skb_next = tmp_msdu->next;
dev_kfree_skb_any(tmp_msdu);
tmp_msdu = skb_next;
}
kfree(mon_mpdu);
}
}
static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
int mac_id,
struct ath11k_mon_data *pmon,
struct napi_struct *napi)
{
struct ath11k_pdev_mon_stats *rx_mon_stats;
struct dp_full_mon_mpdu *tmp;
struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
struct sk_buff *head_msdu, *tail_msdu;
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
int ret;
rx_mon_stats = &pmon->rx_mon_stats;
list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
head_msdu = mon_mpdu->head;
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
&pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
}
kfree(mon_mpdu);
}
return ret;
}
static int
ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
struct ath11k *ar = ab->pdevs[mac_id].ar;
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = &dp->mon_data;
struct hal_sw_mon_ring_entries *sw_mon_entries;
int quota = 0, work = 0, count;
sw_mon_entries = &pmon->sw_mon_entries;
while (pmon->hold_mon_dst_ring) {
quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
napi, 1);
if (pmon->buf_state == DP_MON_STATUS_MATCH) {
count = sw_mon_entries->status_buf_count;
if (count > 1) {
quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
napi, count);
}
ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
pmon, napi);
pmon->hold_mon_dst_ring = false;
} else if (!pmon->mon_status_paddr ||
pmon->buf_state == DP_MON_STATUS_LEAD) {
sw_mon_entries->drop_ppdu = true;
pmon->hold_mon_dst_ring = false;
}
if (!quota)
break;
work += quota;
}
if (sw_mon_entries->drop_ppdu)
ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
return work;
}
static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
struct ath11k *ar = ab->pdevs[mac_id].ar;
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = &dp->mon_data;
struct hal_sw_mon_ring_entries *sw_mon_entries;
struct ath11k_pdev_mon_stats *rx_mon_stats;
struct sk_buff *head_msdu, *tail_msdu;
void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
void *ring_entry;
u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
int quota = 0, ret;
bool break_dst_ring = false;
spin_lock_bh(&pmon->mon_lock);
sw_mon_entries = &pmon->sw_mon_entries;
rx_mon_stats = &pmon->rx_mon_stats;
if (pmon->hold_mon_dst_ring) {
spin_unlock_bh(&pmon->mon_lock);
goto reap_status_ring;
}
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
head_msdu = NULL;
tail_msdu = NULL;
mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
&head_msdu,
&tail_msdu,
sw_mon_entries);
rx_bufs_used += mpdu_rx_bufs_used;
if (!sw_mon_entries->end_of_ppdu) {
if (head_msdu) {
ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
pmon->mon_mpdu,
head_msdu,
tail_msdu);
if (ret)
break_dst_ring = true;
}
goto next_entry;
} else {
if (!sw_mon_entries->ppdu_id &&
!sw_mon_entries->mon_status_paddr) {
break_dst_ring = true;
goto next_entry;
}
}
rx_mon_stats->dest_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
pmon->buf_state = DP_MON_STATUS_LAG;
pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
pmon->hold_mon_dst_ring = true;
next_entry:
ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
mon_dst_srng);
if (break_dst_ring)
break;
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
spin_unlock_bh(&pmon->mon_lock);
if (rx_bufs_used) {
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
&dp->rxdma_mon_buf_ring,
rx_bufs_used,
HAL_RX_BUF_RBM_SW3_BM);
}
reap_status_ring:
quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
napi, budget);
return quota;
}
int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
int ret = 0;
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
ab->hw_params.full_monitor_mode)
ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
return ret;
}
static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
skb_queue_head_init(&pmon->rx_status_q);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
memset(&pmon->rx_mon_stats, 0,
sizeof(pmon->rx_mon_stats));
return 0;
}
int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = &dp->mon_data;
struct hal_srng *mon_desc_srng = NULL;
struct dp_srng *dp_srng;
int ret = 0;
u32 n_link_desc = 0;
ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
if (ret) {
ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
return ret;
}
/* if rxdma1_enable is false, no need to setup
* rxdma_mon_desc_ring.
*/
if (!ar->ab->hw_params.rxdma1_enable)
return 0;
dp_srng = &dp->rxdma_mon_desc_ring;
n_link_desc = dp_srng->size /
ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
mon_desc_srng =
&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
n_link_desc);
if (ret) {
ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
return ret;
}
pmon->mon_last_linkdesc_paddr = 0;
pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
spin_lock_init(&pmon->mon_lock);
return 0;
}
static int ath11k_dp_mon_link_free(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = &dp->mon_data;
ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
HAL_RXDMA_MONITOR_DESC,
&dp->rxdma_mon_desc_ring);
return 0;
}
int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
{
ath11k_dp_mon_link_free(ar);
return 0;
}
int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
{
/* start reap timer */
mod_timer(&ab->mon_reap_timer,
jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
return 0;
}
int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
{
int ret;
if (stop_timer)
del_timer_sync(&ab->mon_reap_timer);
/* reap all the monitor related rings */
ret = ath11k_dp_purge_mon_ring(ab);
if (ret) {
ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
return ret;
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/dp_rx.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "peer.h"
#include "debug.h"
static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
int peer_id)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (peer->peer_id != peer_id)
continue;
return peer;
}
return NULL;
}
struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
const u8 *addr)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
}
return NULL;
}
struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
const u8 *addr)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
if (!ab->rhead_peer_addr)
return NULL;
peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
ab->rhash_peer_addr_param);
return peer;
}
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
int peer_id)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
if (!ab->rhead_peer_id)
return NULL;
peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
ab->rhash_peer_id_param);
return peer;
}
struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
int vdev_id)
{
struct ath11k_peer *peer;
spin_lock_bh(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (vdev_id == peer->vdev_id) {
spin_unlock_bh(&ab->base_lock);
return peer;
}
}
spin_unlock_bh(&ab->base_lock);
return NULL;
}
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
{
struct ath11k_peer *peer;
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_list_by_id(ab, peer_id);
if (!peer) {
ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
peer_id);
goto exit;
}
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, peer_id);
list_del(&peer->list);
kfree(peer);
wake_up(&ab->peer_mapping_wq);
exit:
spin_unlock_bh(&ab->base_lock);
}
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
{
struct ath11k_peer *peer;
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, mac_addr);
if (!peer) {
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer)
goto exit;
peer->vdev_id = vdev_id;
peer->peer_id = peer_id;
peer->ast_hash = ast_hash;
peer->hw_peer_id = hw_peer_id;
ether_addr_copy(peer->addr, mac_addr);
list_add(&peer->list, &ab->peers);
wake_up(&ab->peer_mapping_wq);
}
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer map vdev %d peer %pM id %d\n",
vdev_id, mac_addr, peer_id);
exit:
spin_unlock_bh(&ab->base_lock);
}
static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
const u8 *addr, bool expect_mapped)
{
int ret;
ret = wait_event_timeout(ab->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ab->base_lock);
mapped = !!ath11k_peer_find(ab, vdev_id, addr);
spin_unlock_bh(&ab->base_lock);
(mapped == expect_mapped ||
test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
}), 3 * HZ);
if (ret <= 0)
return -ETIMEDOUT;
return 0;
}
static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
struct rhashtable *rtbl,
struct rhash_head *rhead,
struct rhashtable_params *params,
void *key)
{
struct ath11k_peer *tmp;
lockdep_assert_held(&ab->tbl_mtx_lock);
tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
if (!tmp)
return 0;
else if (IS_ERR(tmp))
return PTR_ERR(tmp);
else
return -EEXIST;
}
static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
struct rhashtable *rtbl,
struct rhash_head *rhead,
struct rhashtable_params *params)
{
int ret;
lockdep_assert_held(&ab->tbl_mtx_lock);
ret = rhashtable_remove_fast(rtbl, rhead, *params);
if (ret && ret != -ENOENT)
return ret;
return 0;
}
static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
{
int ret;
lockdep_assert_held(&ab->base_lock);
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
return -EPERM;
ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
&ab->rhash_peer_id_param, &peer->peer_id);
if (ret) {
ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
peer->addr, peer->peer_id, ret);
return ret;
}
ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
&ab->rhash_peer_addr_param, &peer->addr);
if (ret) {
ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
peer->addr, peer->peer_id, ret);
goto err_clean;
}
return 0;
err_clean:
ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
&ab->rhash_peer_id_param);
return ret;
}
void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
{
struct ath11k_peer *peer, *tmp;
struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
ath11k_peer_rhash_delete(ab, peer);
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
}
static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
{
return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
}
int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr)
{
int ret;
unsigned long time_left;
ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
if (ret) {
ath11k_warn(ar->ab, "failed wait for peer deleted");
return ret;
}
time_left = wait_for_completion_timeout(&ar->peer_delete_done,
3 * HZ);
if (time_left == 0) {
ath11k_warn(ar->ab, "Timeout in receiving peer delete response\n");
return -ETIMEDOUT;
}
return 0;
}
static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
{
int ret;
struct ath11k_peer *peer;
struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_addr(ab, addr);
/* Check if the found peer is what we want to remove.
* While the sta is transitioning to another band we may
* have 2 peer with the same addr assigned to different
* vdev_id. Make sure we are deleting the correct peer.
*/
if (peer && peer->vdev_id == vdev_id)
ath11k_peer_rhash_delete(ab, peer);
/* Fallback to peer list search if the correct peer can't be found.
* Skip the deletion of the peer from the rhash since it has already
* been deleted in peer add.
*/
if (!peer)
peer = ath11k_peer_find(ab, vdev_id, addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
ath11k_warn(ab,
"failed to find peer vdev_id %d addr %pM in delete\n",
vdev_id, addr);
return -EINVAL;
}
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
reinit_completion(&ar->peer_delete_done);
ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
ath11k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
}
ret = ath11k_wait_for_peer_delete_done(ar, vdev_id, addr);
if (ret)
return ret;
return 0;
}
int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
ret = __ath11k_peer_delete(ar, vdev_id, addr);
if (ret)
return ret;
ar->num_peers--;
return 0;
}
static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
{
return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
}
int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param)
{
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
int ret, fbret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->num_peers > (ar->max_num_peers - 1)) {
ath11k_warn(ar->ab,
"failed to create peer due to insufficient peer entry resource in firmware\n");
return -ENOBUFS;
}
mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
if (peer) {
if (peer->vdev_id == param->vdev_id) {
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
return -EINVAL;
}
/* Assume sta is transitioning to another band.
* Remove here the peer from rhash.
*/
ath11k_peer_rhash_delete(ar->ab, peer);
}
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
ret = ath11k_wmi_send_peer_create_cmd(ar, param);
if (ret) {
ath11k_warn(ar->ab,
"failed to send peer create vdev_id %d ret %d\n",
param->vdev_id, ret);
return ret;
}
ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
param->peer_addr);
if (ret)
return ret;
mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id);
ret = -ENOENT;
goto cleanup;
}
ret = ath11k_peer_rhash_add(ar->ab, peer);
if (ret) {
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
goto cleanup;
}
peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
if (arvif->vif->type == NL80211_IFTYPE_STATION) {
arvif->ast_hash = peer->ast_hash;
arvif->ast_idx = peer->hw_peer_id;
}
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
if (sta) {
arsta = (struct ath11k_sta *)sta->drv_priv;
arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
peer->peer_id);
/* set HTT extension valid bit to 0 by default */
arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
}
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
return 0;
cleanup:
fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
if (fbret)
ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
param->peer_addr, param->vdev_id, fbret);
return ret;
}
int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
{
int ret;
lockdep_assert_held(&ab->base_lock);
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
return -EPERM;
ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
&ab->rhash_peer_addr_param);
if (ret) {
ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
peer->addr, peer->peer_id, ret);
return ret;
}
ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
&ab->rhash_peer_id_param);
if (ret) {
ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
peer->addr, peer->peer_id, ret);
return ret;
}
return 0;
}
static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
{
struct rhashtable_params *param;
struct rhashtable *rhash_id_tbl;
int ret;
size_t size;
lockdep_assert_held(&ab->tbl_mtx_lock);
if (ab->rhead_peer_id)
return 0;
size = sizeof(*ab->rhead_peer_id);
rhash_id_tbl = kzalloc(size, GFP_KERNEL);
if (!rhash_id_tbl) {
ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
size);
return -ENOMEM;
}
param = &ab->rhash_peer_id_param;
param->key_offset = offsetof(struct ath11k_peer, peer_id);
param->head_offset = offsetof(struct ath11k_peer, rhash_id);
param->key_len = sizeof_field(struct ath11k_peer, peer_id);
param->automatic_shrinking = true;
param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
ret = rhashtable_init(rhash_id_tbl, param);
if (ret) {
ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
goto err_free;
}
spin_lock_bh(&ab->base_lock);
if (!ab->rhead_peer_id) {
ab->rhead_peer_id = rhash_id_tbl;
} else {
spin_unlock_bh(&ab->base_lock);
goto cleanup_tbl;
}
spin_unlock_bh(&ab->base_lock);
return 0;
cleanup_tbl:
rhashtable_destroy(rhash_id_tbl);
err_free:
kfree(rhash_id_tbl);
return ret;
}
static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
{
struct rhashtable_params *param;
struct rhashtable *rhash_addr_tbl;
int ret;
size_t size;
lockdep_assert_held(&ab->tbl_mtx_lock);
if (ab->rhead_peer_addr)
return 0;
size = sizeof(*ab->rhead_peer_addr);
rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
if (!rhash_addr_tbl) {
ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
size);
return -ENOMEM;
}
param = &ab->rhash_peer_addr_param;
param->key_offset = offsetof(struct ath11k_peer, addr);
param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
param->key_len = sizeof_field(struct ath11k_peer, addr);
param->automatic_shrinking = true;
param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
ret = rhashtable_init(rhash_addr_tbl, param);
if (ret) {
ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
goto err_free;
}
spin_lock_bh(&ab->base_lock);
if (!ab->rhead_peer_addr) {
ab->rhead_peer_addr = rhash_addr_tbl;
} else {
spin_unlock_bh(&ab->base_lock);
goto cleanup_tbl;
}
spin_unlock_bh(&ab->base_lock);
return 0;
cleanup_tbl:
rhashtable_destroy(rhash_addr_tbl);
err_free:
kfree(rhash_addr_tbl);
return ret;
}
static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
{
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_id)
return;
rhashtable_destroy(ab->rhead_peer_id);
kfree(ab->rhead_peer_id);
ab->rhead_peer_id = NULL;
}
static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
{
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_addr)
return;
rhashtable_destroy(ab->rhead_peer_addr);
kfree(ab->rhead_peer_addr);
ab->rhead_peer_addr = NULL;
}
int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
{
int ret;
mutex_lock(&ab->tbl_mtx_lock);
ret = ath11k_peer_rhash_id_tbl_init(ab);
if (ret)
goto out;
ret = ath11k_peer_rhash_addr_tbl_init(ab);
if (ret)
goto cleanup_tbl;
mutex_unlock(&ab->tbl_mtx_lock);
return 0;
cleanup_tbl:
ath11k_peer_rhash_id_tbl_destroy(ab);
out:
mutex_unlock(&ab->tbl_mtx_lock);
return ret;
}
void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
{
mutex_lock(&ab->tbl_mtx_lock);
ath11k_peer_rhash_addr_tbl_destroy(ab);
ath11k_peer_rhash_id_tbl_destroy(ab);
mutex_unlock(&ab->tbl_mtx_lock);
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/peer.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/vmalloc.h>
#include "debugfs_sta.h"
#include "core.h"
#include "peer.h"
#include "debug.h"
#include "dp_tx.h"
#include "debugfs_htt_stats.h"
void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
struct ath11k_per_peer_tx_stats *peer_stats,
u8 legacy_rate_idx)
{
struct rate_info *txrate = &arsta->txrate;
struct ath11k_htt_tx_stats *tx_stats;
int gi, mcs, bw, nss;
if (!arsta->tx_stats)
return;
tx_stats = arsta->tx_stats;
gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
mcs = txrate->mcs;
bw = ath11k_mac_mac80211_bw_to_ath11k_bw(txrate->bw);
nss = txrate->nss - 1;
#define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name]
if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
STATS_OP_FMT(SUCC).he[0][mcs] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).he[1][mcs] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).he[0][mcs] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).he[1][mcs] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).he[0][mcs] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).he[1][mcs] += peer_stats->retry_pkts;
} else if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
STATS_OP_FMT(SUCC).vht[0][mcs] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).vht[1][mcs] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).vht[0][mcs] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).vht[1][mcs] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).vht[0][mcs] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).vht[1][mcs] += peer_stats->retry_pkts;
} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(SUCC).ht[0][mcs] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).ht[1][mcs] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).ht[0][mcs] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).ht[1][mcs] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).ht[0][mcs] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).ht[1][mcs] += peer_stats->retry_pkts;
} else {
mcs = legacy_rate_idx;
STATS_OP_FMT(SUCC).legacy[0][mcs] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).legacy[1][mcs] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).legacy[0][mcs] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).legacy[1][mcs] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).legacy[0][mcs] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).legacy[1][mcs] += peer_stats->retry_pkts;
}
if (peer_stats->is_ampdu) {
tx_stats->ba_fails += peer_stats->ba_fails;
if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
STATS_OP_FMT(AMPDU).he[0][mcs] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).he[1][mcs] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(AMPDU).ht[0][mcs] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).ht[1][mcs] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
} else {
STATS_OP_FMT(AMPDU).vht[0][mcs] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).vht[1][mcs] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
}
STATS_OP_FMT(AMPDU).bw[0][bw] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).nss[0][nss] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).gi[0][gi] +=
peer_stats->succ_bytes + peer_stats->retry_bytes;
STATS_OP_FMT(AMPDU).bw[1][bw] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
STATS_OP_FMT(AMPDU).nss[1][nss] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
STATS_OP_FMT(AMPDU).gi[1][gi] +=
peer_stats->succ_pkts + peer_stats->retry_pkts;
} else {
tx_stats->ack_fails += peer_stats->ba_fails;
}
STATS_OP_FMT(SUCC).bw[0][bw] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).nss[0][nss] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).gi[0][gi] += peer_stats->succ_bytes;
STATS_OP_FMT(SUCC).bw[1][bw] += peer_stats->succ_pkts;
STATS_OP_FMT(SUCC).nss[1][nss] += peer_stats->succ_pkts;
STATS_OP_FMT(SUCC).gi[1][gi] += peer_stats->succ_pkts;
STATS_OP_FMT(FAIL).bw[0][bw] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).nss[0][nss] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).gi[0][gi] += peer_stats->failed_bytes;
STATS_OP_FMT(FAIL).bw[1][bw] += peer_stats->failed_pkts;
STATS_OP_FMT(FAIL).nss[1][nss] += peer_stats->failed_pkts;
STATS_OP_FMT(FAIL).gi[1][gi] += peer_stats->failed_pkts;
STATS_OP_FMT(RETRY).bw[0][bw] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).nss[0][nss] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).gi[0][gi] += peer_stats->retry_bytes;
STATS_OP_FMT(RETRY).bw[1][bw] += peer_stats->retry_pkts;
STATS_OP_FMT(RETRY).nss[1][nss] += peer_stats->retry_pkts;
STATS_OP_FMT(RETRY).gi[1][gi] += peer_stats->retry_pkts;
tx_stats->tx_duration += peer_stats->duration;
}
void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
struct hal_tx_status *ts)
{
ath11k_dp_tx_update_txcompl(ar, ts);
}
static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct ath11k_htt_data_stats *stats;
static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail",
"retry", "ampdu"};
static const char *str[ATH11K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
int len = 0, i, j, k, retval = 0;
const int size = 2 * 4096;
char *buf;
if (!arsta->tx_stats)
return -ENOENT;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
for (k = 0; k < ATH11K_STATS_TYPE_MAX; k++) {
for (j = 0; j < ATH11K_COUNTER_TYPE_MAX; j++) {
stats = &arsta->tx_stats->stats[k];
len += scnprintf(buf + len, size - len, "%s_%s\n",
str_name[k],
str[j]);
len += scnprintf(buf + len, size - len,
" HE MCS %s\n",
str[j]);
for (i = 0; i < ATH11K_HE_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ",
stats->he[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" VHT MCS %s\n",
str[j]);
for (i = 0; i < ATH11K_VHT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ",
stats->vht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, " HT MCS %s\n",
str[j]);
for (i = 0; i < ATH11K_HT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ", stats->ht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" BW %s (20,40,80,160 MHz)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->bw[j][0], stats->bw[j][1],
stats->bw[j][2], stats->bw[j][3]);
len += scnprintf(buf + len, size - len,
" NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->nss[j][0], stats->nss[j][1],
stats->nss[j][2], stats->nss[j][3]);
len += scnprintf(buf + len, size - len,
" GI %s (0.4us,0.8us,1.6us,3.2us)\n",
str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->gi[j][0], stats->gi[j][1],
stats->gi[j][2], stats->gi[j][3]);
len += scnprintf(buf + len, size - len,
" legacy rate %s (1,2 ... Mbps)\n ",
str[j]);
for (i = 0; i < ATH11K_LEGACY_NUM; i++)
len += scnprintf(buf + len, size - len, "%llu ",
stats->legacy[j][i]);
len += scnprintf(buf + len, size - len, "\n");
}
}
len += scnprintf(buf + len, size - len,
"\nTX duration\n %llu usecs\n",
arsta->tx_stats->tx_duration);
len += scnprintf(buf + len, size - len,
"BA fails\n %llu\n", arsta->tx_stats->ba_fails);
len += scnprintf(buf + len, size - len,
"ack fails\n %llu\n", arsta->tx_stats->ack_fails);
spin_unlock_bh(&ar->data_lock);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
mutex_unlock(&ar->conf_mutex);
return retval;
}
static const struct file_operations fops_tx_stats = {
.read = ath11k_dbg_sta_dump_tx_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
int len = 0, i, retval = 0;
const int size = 4096;
char *buf;
if (!rx_stats)
return -ENOENT;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->ab->base_lock);
len += scnprintf(buf + len, size - len, "RX peer stats:\n");
len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
rx_stats->num_msdu);
len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
rx_stats->tcp_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
rx_stats->udp_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
rx_stats->ampdu_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
rx_stats->non_ampdu_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
rx_stats->stbc_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
rx_stats->beamformed_count);
len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
rx_stats->num_mpdu_fcs_ok);
len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
rx_stats->num_mpdu_fcs_err);
len += scnprintf(buf + len, size - len,
"GI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n",
rx_stats->gi_count[0], rx_stats->gi_count[1],
rx_stats->gi_count[2], rx_stats->gi_count[3]);
len += scnprintf(buf + len, size - len,
"BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu\n",
rx_stats->bw_count[0], rx_stats->bw_count[1],
rx_stats->bw_count[2], rx_stats->bw_count[3]);
len += scnprintf(buf + len, size - len, "BCC %llu LDPC %llu\n",
rx_stats->coding_count[0], rx_stats->coding_count[1]);
len += scnprintf(buf + len, size - len,
"preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu\n",
rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
rx_stats->pream_cnt[4]);
len += scnprintf(buf + len, size - len,
"reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
rx_stats->reception_type[0], rx_stats->reception_type[1],
rx_stats->reception_type[2], rx_stats->reception_type[3]);
len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
len += scnprintf(buf + len, size - len, "\nMCS(0-11) Legacy MCS(12):");
for (i = 0; i < HAL_RX_MAX_MCS + 1; i++)
len += scnprintf(buf + len, size - len, "%llu ", rx_stats->mcs_count[i]);
len += scnprintf(buf + len, size - len, "\nNSS(1-8):");
for (i = 0; i < HAL_RX_MAX_NSS; i++)
len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]);
len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ",
rx_stats->rx_duration);
len += scnprintf(buf + len, size - len,
"\nDCM: %llu\nRU: 26 %llu 52: %llu 106: %llu 242: %llu 484: %llu 996: %llu\n",
rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
rx_stats->ru_alloc_cnt[5]);
len += scnprintf(buf + len, size - len, "\n");
spin_unlock_bh(&ar->ab->base_lock);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
mutex_unlock(&ar->conf_mutex);
return retval;
}
static const struct file_operations fops_rx_stats = {
.read = ath11k_dbg_sta_dump_rx_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int
ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
{
struct ieee80211_sta *sta = inode->i_private;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct debug_htt_stats_req *stats_req;
int type = ar->debug.htt_stats.type;
int ret;
if ((type != ATH11K_DBG_HTT_EXT_STATS_PEER_INFO &&
type != ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) ||
type == ATH11K_DBG_HTT_EXT_STATS_RESET)
return -EPERM;
stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
if (!stats_req)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
ar->debug.htt_stats.stats_req = stats_req;
stats_req->type = type;
memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
ret = ath11k_debugfs_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
if (ret < 0)
goto out;
file->private_data = stats_req;
return 0;
out:
vfree(stats_req);
ar->debug.htt_stats.stats_req = NULL;
return ret;
}
static int
ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
{
struct ieee80211_sta *sta = inode->i_private;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
mutex_lock(&ar->conf_mutex);
vfree(file->private_data);
ar->debug.htt_stats.stats_req = NULL;
mutex_unlock(&ar->conf_mutex);
return 0;
}
static ssize_t ath11k_dbg_sta_read_htt_peer_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct debug_htt_stats_req *stats_req = file->private_data;
char *buf;
u32 length = 0;
buf = stats_req->buf;
length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
return simple_read_from_buffer(user_buf, count, ppos, buf, length);
}
static const struct file_operations fops_htt_peer_stats = {
.open = ath11k_dbg_sta_open_htt_peer_stats,
.release = ath11k_dbg_sta_release_htt_peer_stats,
.read = ath11k_dbg_sta_read_htt_peer_stats,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
int ret, enable;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
ret = kstrtoint_from_user(buf, count, 0, &enable);
if (ret)
goto out;
ar->debug.pktlog_peer_valid = enable;
memcpy(ar->debug.pktlog_peer_addr, sta->addr, ETH_ALEN);
/* Send peer based pktlog enable/disable */
ret = ath11k_wmi_pdev_peer_pktlog_filter(ar, sta->addr, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to set peer pktlog filter %pM: %d\n",
sta->addr, ret);
goto out;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "peer pktlog filter set to %d\n",
enable);
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file,
char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
char buf[32] = {0};
int len;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf), "%08x %pM\n",
ar->debug.pktlog_peer_valid,
ar->debug.pktlog_peer_addr);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_peer_pktlog = {
.write = ath11k_dbg_sta_write_peer_pktlog,
.read = ath11k_dbg_sta_read_peer_pktlog,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_write_delba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u32 tid, initiator, reason;
int ret;
char buf[64] = {0};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
if (ret != 3)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON ||
arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
ret = count;
goto out;
}
ret = ath11k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, initiator, reason);
if (ret) {
ath11k_warn(ar->ab, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
arsta->arvif->vdev_id, sta->addr, tid, initiator,
reason);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_delba = {
.write = ath11k_dbg_sta_write_delba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u32 tid, status;
int ret;
char buf[64] = {0};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
ret = sscanf(buf, "%u %u", &tid, &status);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON ||
arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
ret = count;
goto out;
}
ret = ath11k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
tid, status);
if (ret) {
ath11k_warn(ar->ab, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
arsta->arvif->vdev_id, sta->addr, tid, status);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba_resp = {
.write = ath11k_dbg_sta_write_addba_resp,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_write_addba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u32 tid, buf_size;
int ret;
char buf[64] = {0};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
ret = sscanf(buf, "%u %u", &tid, &buf_size);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON ||
arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
ret = count;
goto out;
}
ret = ath11k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, buf_size);
if (ret) {
ath11k_warn(ar->ab, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
arsta->arvif->vdev_id, sta->addr, tid, buf_size);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba = {
.write = ath11k_dbg_sta_write_addba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
char buf[64];
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len,
"aggregation mode: %s\n\n%s\n%s\n",
(arsta->aggr_mode == ATH11K_DBG_AGGR_MODE_AUTO) ?
"auto" : "manual", "auto = 0", "manual = 1");
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u32 aggr_mode;
int ret;
if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
return -EINVAL;
if (aggr_mode >= ATH11K_DBG_AGGR_MODE_MAX)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON ||
aggr_mode == arsta->aggr_mode) {
ret = count;
goto out;
}
ret = ath11k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
if (ret) {
ath11k_warn(ar->ab, "failed to clear addba session ret: %d\n",
ret);
goto out;
}
arsta->aggr_mode = aggr_mode;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_aggr_mode = {
.read = ath11k_dbg_sta_read_aggr_mode,
.write = ath11k_dbg_sta_write_aggr_mode,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t
ath11k_write_htt_peer_stats_reset(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct htt_ext_stats_cfg_params cfg_params = { 0 };
int ret;
u8 type;
ret = kstrtou8_from_user(user_buf, count, 0, &type);
if (ret)
return ret;
if (!type)
return ret;
mutex_lock(&ar->conf_mutex);
cfg_params.cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
cfg_params.cfg0 |= FIELD_PREP(GENMASK(15, 1),
HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
cfg_params.cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
cfg_params.cfg2 |= FIELD_PREP(GENMASK(7, 0), sta->addr[0]);
cfg_params.cfg2 |= FIELD_PREP(GENMASK(15, 8), sta->addr[1]);
cfg_params.cfg2 |= FIELD_PREP(GENMASK(23, 16), sta->addr[2]);
cfg_params.cfg2 |= FIELD_PREP(GENMASK(31, 24), sta->addr[3]);
cfg_params.cfg3 |= FIELD_PREP(GENMASK(7, 0), sta->addr[4]);
cfg_params.cfg3 |= FIELD_PREP(GENMASK(15, 8), sta->addr[5]);
cfg_params.cfg3 |= ATH11K_HTT_PEER_STATS_RESET;
ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
ATH11K_DBG_HTT_EXT_STATS_PEER_INFO,
&cfg_params,
0ULL);
if (ret) {
ath11k_warn(ar->ab, "failed to send htt peer stats request: %d\n", ret);
mutex_unlock(&ar->conf_mutex);
return ret;
}
mutex_unlock(&ar->conf_mutex);
ret = count;
return ret;
}
static const struct file_operations fops_htt_peer_stats_reset = {
.write = ath11k_write_htt_peer_stats_reset,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
char buf[20];
int len;
spin_lock_bh(&ar->data_lock);
len = scnprintf(buf, sizeof(buf), "%d\n", arsta->peer_ps_state);
spin_unlock_bh(&ar->data_lock);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_peer_ps_state = {
.open = simple_open,
.read = ath11k_dbg_sta_read_peer_ps_state,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file,
char __user *user_buf,
size_t count,
loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u64 time_since_station_in_power_save;
char buf[20];
int len;
spin_lock_bh(&ar->data_lock);
if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
arsta->peer_current_ps_valid)
time_since_station_in_power_save = jiffies_to_msecs(jiffies
- arsta->ps_start_jiffies);
else
time_since_station_in_power_save = 0;
len = scnprintf(buf, sizeof(buf), "%llu\n",
time_since_station_in_power_save);
spin_unlock_bh(&ar->data_lock);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_current_ps_duration = {
.open = simple_open,
.read = ath11k_dbg_sta_read_current_ps_duration,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
char buf[20];
u64 power_save_duration;
int len;
spin_lock_bh(&ar->data_lock);
if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
arsta->peer_current_ps_valid)
power_save_duration = jiffies_to_msecs(jiffies
- arsta->ps_start_jiffies)
+ arsta->ps_total_duration;
else
power_save_duration = arsta->ps_total_duration;
len = scnprintf(buf, sizeof(buf), "%llu\n", power_save_duration);
spin_unlock_bh(&ar->data_lock);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_total_ps_duration = {
.open = simple_open,
.read = ath11k_dbg_sta_read_total_ps_duration,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
struct ath11k *ar = hw->priv;
if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
debugfs_create_file("tx_stats", 0400, dir, sta,
&fops_tx_stats);
if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
debugfs_create_file("rx_stats", 0400, dir, sta,
&fops_rx_stats);
debugfs_create_file("htt_peer_stats", 0400, dir, sta,
&fops_htt_peer_stats);
debugfs_create_file("peer_pktlog", 0644, dir, sta,
&fops_peer_pktlog);
debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
if (test_bit(WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET,
ar->ab->wmi_ab.svc_map))
debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
&fops_htt_peer_stats_reset);
debugfs_create_file("peer_ps_state", 0400, dir, sta,
&fops_peer_ps_state);
if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
ar->ab->wmi_ab.svc_map)) {
debugfs_create_file("current_ps_duration", 0440, dir, sta,
&fops_current_ps_duration);
debugfs_create_file("total_ps_duration", 0440, dir, sta,
&fops_total_ps_duration);
}
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/debugfs_sta.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hal_desc.h"
#include "hal.h"
#include "hal_tx.h"
#include "hif.h"
#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
/* dscp_tid_map - Default DSCP-TID mapping
*
* DSCP TID
* 000000 0
* 001000 1
* 010000 2
* 011000 3
* 100000 4
* 101000 5
* 110000 6
* 111000 7
*/
static const u8 dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7,
};
void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
struct hal_tx_info *ti)
{
struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
tcl_cmd->buf_addr_info.info0 =
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, ti->paddr);
tcl_cmd->buf_addr_info.info1 =
FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
tcl_cmd->buf_addr_info.info1 |=
FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
tcl_cmd->info0 =
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE,
ti->encrypt_type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE,
ti->search_type) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN,
ti->addr_search_flags) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM,
ti->meta_data_flags);
tcl_cmd->info1 = ti->flags0 |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
tcl_cmd->info2 = ti->flags1 |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
ti->dscp_tid_tbl_idx) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX,
ti->bss_ast_idx) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM,
ti->bss_ast_hash);
tcl_cmd->info4 = 0;
if (ti->enable_mesh)
ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd);
}
void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
{
u32 ctrl_reg_val;
u32 addr;
u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE];
int i;
u32 value;
int cnt = 0;
ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG);
/* Enable read/write access */
ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
(4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
/* Configure each DSCP-TID mapping in three bits there by configure
* three bytes in an iteration.
*/
for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
dscp_tid_map[i]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
dscp_tid_map[i + 1]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
dscp_tid_map[i + 2]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
dscp_tid_map[i + 3]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
dscp_tid_map[i + 4]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
dscp_tid_map[i + 5]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
dscp_tid_map[i + 6]) |
FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
dscp_tid_map[i + 7]);
memcpy(&hw_map_val[cnt], (u8 *)&value, 3);
cnt += 3;
}
for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
ath11k_hif_write32(ab, addr, *(u32 *)&hw_map_val[i]);
addr += 4;
}
/* Disable read/write access */
ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG);
ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG,
ctrl_reg_val);
}
void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng)
{
struct hal_srng_params params;
struct hal_tlv_hdr *tlv;
int i, entry_size;
u8 *desc;
memset(¶ms, 0, sizeof(params));
entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_TCL_DATA);
ath11k_hal_srng_get_params(ab, srng, ¶ms);
desc = (u8 *)params.ring_base_vaddr;
for (i = 0; i < params.num_entries; i++) {
tlv = (struct hal_tlv_hdr *)desc;
tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
FIELD_PREP(HAL_TLV_HDR_LEN,
sizeof(struct hal_tcl_data_cmd));
desc += entry_size;
}
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/hal_tx.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
#include "debug.h"
#include "hal_desc.h"
#include "hif.h"
static const struct hal_srng_config hw_srng_config_template[] = {
/* TODO: max_rings can populated by querying HW capabilities */
{ /* REO_DST */
.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
.max_rings = 4,
.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_EXCEPTION */
/* Designating REO2TCL ring as exception ring. This ring is
* similar to other REO2SW rings though it is named as REO2TCL.
* Any of theREO2SW rings can be used as exception ring.
*/
.start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
.max_rings = 1,
.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_REINJECT */
.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
.max_rings = 1,
.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_CMD */
.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
.max_rings = 1,
.entry_size = (sizeof(struct hal_tlv_hdr) +
sizeof(struct hal_reo_get_queue_stats)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_STATUS */
.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
.max_rings = 1,
.entry_size = (sizeof(struct hal_tlv_hdr) +
sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
},
{ /* TCL_DATA */
.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
.max_rings = 3,
.entry_size = (sizeof(struct hal_tlv_hdr) +
sizeof(struct hal_tcl_data_cmd)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
},
{ /* TCL_CMD */
.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
.max_rings = 1,
.entry_size = (sizeof(struct hal_tlv_hdr) +
sizeof(struct hal_tcl_gse_cmd)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
},
{ /* TCL_STATUS */
.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
.max_rings = 1,
.entry_size = (sizeof(struct hal_tlv_hdr) +
sizeof(struct hal_tcl_status_ring)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
},
{ /* CE_SRC */
.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
.max_rings = 12,
.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
},
{ /* CE_DST */
.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
.max_rings = 12,
.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
},
{ /* CE_DST_STATUS */
.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
.max_rings = 12,
.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
},
{ /* WBM_IDLE_LINK */
.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
.max_rings = 1,
.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
},
{ /* SW2WBM_RELEASE */
.start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
.max_rings = 1,
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
},
{ /* WBM2SW_RELEASE */
.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
.max_rings = 5,
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
},
{ /* RXDMA_BUF */
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
.max_rings = 2,
.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
.lmac_ring = true,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE,
},
{ /* RXDMA_DST */
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
.max_rings = 1,
.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
.lmac_ring = true,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_RXDMA_RING_MAX_SIZE,
},
{ /* RXDMA_MONITOR_BUF */
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
.max_rings = 1,
.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
.lmac_ring = true,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE,
},
{ /* RXDMA_MONITOR_STATUS */
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
.max_rings = 1,
.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
.lmac_ring = true,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE,
},
{ /* RXDMA_MONITOR_DST */
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
.max_rings = 1,
.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
.lmac_ring = true,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_RXDMA_RING_MAX_SIZE,
},
{ /* RXDMA_MONITOR_DESC */
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
.max_rings = 1,
.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
.lmac_ring = true,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE,
},
{ /* RXDMA DIR BUF */
.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
.max_rings = 1,
.entry_size = 8 >> 2, /* TODO: Define the struct */
.lmac_ring = true,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE,
},
};
static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
size_t size;
size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
GFP_KERNEL);
if (!hal->rdp.vaddr)
return -ENOMEM;
return 0;
}
static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
size_t size;
if (!hal->rdp.vaddr)
return;
size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
dma_free_coherent(ab->dev, size,
hal->rdp.vaddr, hal->rdp.paddr);
hal->rdp.vaddr = NULL;
}
static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
size_t size;
size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
GFP_KERNEL);
if (!hal->wrp.vaddr)
return -ENOMEM;
return 0;
}
static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
size_t size;
if (!hal->wrp.vaddr)
return;
size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
dma_free_coherent(ab->dev, size,
hal->wrp.vaddr, hal->wrp.paddr);
hal->wrp.vaddr = NULL;
}
static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
struct hal_srng *srng, int ring_num)
{
struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
u32 addr;
u32 val;
addr = HAL_CE_DST_RING_CTRL +
srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
val = ath11k_hif_read32(ab, addr);
val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
srng->u.dst_ring.max_buffer_length);
ath11k_hif_write32(ab, addr, val);
}
static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
struct hal_srng *srng)
{
struct ath11k_hal *hal = &ab->hal;
u32 val;
u64 hp_addr;
u32 reg_base;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
ath11k_hif_write32(ab, reg_base +
HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
srng->msi_addr);
val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
((u64)srng->msi_addr >>
HAL_ADDR_MSB_REG_SHIFT)) |
HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
ath11k_hif_write32(ab, reg_base +
HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
ath11k_hif_write32(ab,
reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
srng->msi_data);
}
ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
((u64)srng->ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
(srng->entry_size * srng->num_entries));
ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
/* interrupt setup */
val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
(srng->intr_timer_thres_us >> 3));
val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
(srng->intr_batch_cntr_thres_entries *
srng->entry_size));
ath11k_hif_write32(ab,
reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
val);
hp_addr = hal->rdp.paddr +
((unsigned long)srng->u.dst_ring.hp_addr -
(unsigned long)hal->rdp.vaddr);
ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
hp_addr & HAL_ADDR_LSB_REG_MASK);
ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
/* Initialize head and tail pointers to indicate ring is empty */
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
ath11k_hif_write32(ab, reg_base, 0);
ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
*srng->u.dst_ring.hp_addr = 0;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
val = 0;
if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
val |= HAL_REO1_RING_MISC_MSI_SWAP;
val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
}
static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
struct hal_srng *srng)
{
struct ath11k_hal *hal = &ab->hal;
u32 val;
u64 tp_addr;
u32 reg_base;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
ath11k_hif_write32(ab, reg_base +
HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
srng->msi_addr);
val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
((u64)srng->msi_addr >>
HAL_ADDR_MSB_REG_SHIFT)) |
HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
ath11k_hif_write32(ab, reg_base +
HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
val);
ath11k_hif_write32(ab, reg_base +
HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
srng->msi_data);
}
ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
((u64)srng->ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
(srng->entry_size * srng->num_entries));
ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
((u64)srng->ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
(srng->entry_size * srng->num_entries));
ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
}
/* interrupt setup */
/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
* unit of 8 usecs instead of 1 usec (as required by v1).
*/
val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
srng->intr_timer_thres_us);
val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
(srng->intr_batch_cntr_thres_entries *
srng->entry_size));
ath11k_hif_write32(ab,
reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
val);
val = 0;
if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
srng->u.src_ring.low_threshold);
}
ath11k_hif_write32(ab,
reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
val);
if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
tp_addr = hal->rdp.paddr +
((unsigned long)srng->u.src_ring.tp_addr -
(unsigned long)hal->rdp.vaddr);
ath11k_hif_write32(ab,
reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
tp_addr & HAL_ADDR_LSB_REG_MASK);
ath11k_hif_write32(ab,
reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
}
/* Initialize head and tail pointers to indicate ring is empty */
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
ath11k_hif_write32(ab, reg_base, 0);
ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
*srng->u.src_ring.tp_addr = 0;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
val = 0;
if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
val |= HAL_TCL1_RING_MISC_MSI_SWAP;
/* Loop count is not used for SRC rings */
val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
}
static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
struct hal_srng *srng)
{
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
ath11k_hal_srng_src_hw_init(ab, srng);
else
ath11k_hal_srng_dst_hw_init(ab, srng);
}
static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
enum hal_ring_type type,
int ring_num, int mac_id)
{
struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
int ring_id;
if (ring_num >= srng_config->max_rings) {
ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
return -EINVAL;
}
ring_id = srng_config->start_ring_id + ring_num;
if (srng_config->lmac_ring)
ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
return -EINVAL;
return ring_id;
}
int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
{
struct hal_srng_config *srng_config;
if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
return -EINVAL;
srng_config = &ab->hal.srng_config[ring_type];
return (srng_config->entry_size << 2);
}
int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
{
struct hal_srng_config *srng_config;
if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
return -EINVAL;
srng_config = &ab->hal.srng_config[ring_type];
return (srng_config->max_size / srng_config->entry_size);
}
void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
struct hal_srng_params *params)
{
params->ring_base_paddr = srng->ring_base_paddr;
params->ring_base_vaddr = srng->ring_base_vaddr;
params->num_entries = srng->num_entries;
params->intr_timer_thres_us = srng->intr_timer_thres_us;
params->intr_batch_cntr_thres_entries =
srng->intr_batch_cntr_thres_entries;
params->low_threshold = srng->u.src_ring.low_threshold;
params->msi_addr = srng->msi_addr;
params->msi_data = srng->msi_data;
params->flags = srng->flags;
}
dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
struct hal_srng *srng)
{
if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
return 0;
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
return ab->hal.wrp.paddr +
((unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->hal.wrp.vaddr);
else
return ab->hal.rdp.paddr +
((unsigned long)srng->u.dst_ring.hp_addr -
(unsigned long)ab->hal.rdp.vaddr);
}
dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
struct hal_srng *srng)
{
if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
return 0;
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
return ab->hal.rdp.paddr +
((unsigned long)srng->u.src_ring.tp_addr -
(unsigned long)ab->hal.rdp.vaddr);
else
return ab->hal.wrp.paddr +
((unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->hal.wrp.vaddr);
}
u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
{
switch (type) {
case HAL_CE_DESC_SRC:
return sizeof(struct hal_ce_srng_src_desc);
case HAL_CE_DESC_DST:
return sizeof(struct hal_ce_srng_dest_desc);
case HAL_CE_DESC_DST_STATUS:
return sizeof(struct hal_ce_srng_dst_status_desc);
}
return 0;
}
void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
u8 byte_swap_data)
{
struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
desc->buffer_addr_info =
FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
byte_swap_data) |
FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
}
void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
{
struct hal_ce_srng_dest_desc *desc =
(struct hal_ce_srng_dest_desc *)buf;
desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
desc->buffer_addr_info =
FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
}
u32 ath11k_hal_ce_dst_status_get_length(void *buf)
{
struct hal_ce_srng_dst_status_desc *desc =
(struct hal_ce_srng_dst_status_desc *)buf;
u32 len;
len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
return len;
}
void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
dma_addr_t paddr)
{
desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
(paddr & HAL_ADDR_LSB_REG_MASK));
desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
}
u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
return NULL;
}
static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
struct hal_srng *srng)
{
u32 *desc;
/* prefetch only if desc is available */
desc = ath11k_hal_srng_dst_peek(ab, srng);
if (likely(desc)) {
dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
(srng->entry_size * sizeof(u32)),
DMA_FROM_DEVICE);
prefetch(desc);
}
}
u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
struct hal_srng *srng)
{
u32 *desc;
lockdep_assert_held(&srng->lock);
if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
return NULL;
desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
srng->u.dst_ring.tp += srng->entry_size;
/* wrap around to start of ring*/
if (srng->u.dst_ring.tp == srng->ring_size)
srng->u.dst_ring.tp = 0;
/* Try to prefetch the next descriptor in the ring */
if (srng->flags & HAL_SRNG_FLAGS_CACHED)
ath11k_hal_srng_prefetch_desc(ab, srng);
return desc;
}
int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr)
{
u32 tp, hp;
lockdep_assert_held(&srng->lock);
tp = srng->u.dst_ring.tp;
if (sync_hw_ptr) {
hp = *srng->u.dst_ring.hp_addr;
srng->u.dst_ring.cached_hp = hp;
} else {
hp = srng->u.dst_ring.cached_hp;
}
if (hp >= tp)
return (hp - tp) / srng->entry_size;
else
return (srng->ring_size - tp + hp) / srng->entry_size;
}
/* Returns number of available entries in src ring */
int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr)
{
u32 tp, hp;
lockdep_assert_held(&srng->lock);
hp = srng->u.src_ring.hp;
if (sync_hw_ptr) {
tp = *srng->u.src_ring.tp_addr;
srng->u.src_ring.cached_tp = tp;
} else {
tp = srng->u.src_ring.cached_tp;
}
if (tp > hp)
return ((tp - hp) / srng->entry_size) - 1;
else
return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
}
u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
struct hal_srng *srng)
{
u32 *desc;
u32 next_hp;
lockdep_assert_held(&srng->lock);
/* TODO: Using % is expensive, but we have to do this since size of some
* SRNG rings is not power of 2 (due to descriptor sizes). Need to see
* if separate function is defined for rings having power of 2 ring size
* (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
* overhead of % by using mask (with &).
*/
next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
if (next_hp == srng->u.src_ring.cached_tp)
return NULL;
desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
srng->u.src_ring.hp = next_hp;
/* TODO: Reap functionality is not used by all rings. If particular
* ring does not use reap functionality, we need not update reap_hp
* with next_hp pointer. Need to make sure a separate function is used
* before doing any optimization by removing below code updating
* reap_hp.
*/
srng->u.src_ring.reap_hp = next_hp;
return desc;
}
u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
struct hal_srng *srng)
{
u32 *desc;
u32 next_reap_hp;
lockdep_assert_held(&srng->lock);
next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
srng->ring_size;
if (next_reap_hp == srng->u.src_ring.cached_tp)
return NULL;
desc = srng->ring_base_vaddr + next_reap_hp;
srng->u.src_ring.reap_hp = next_reap_hp;
return desc;
}
u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
struct hal_srng *srng)
{
u32 *desc;
lockdep_assert_held(&srng->lock);
if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
return NULL;
desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
srng->ring_size;
return desc;
}
u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
srng->u.src_ring.cached_tp)
return NULL;
return srng->ring_base_vaddr + srng->u.src_ring.hp;
}
void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
} else {
srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
/* Try to prefetch the next descriptor in the ring */
if (srng->flags & HAL_SRNG_FLAGS_CACHED)
ath11k_hal_srng_prefetch_desc(ab, srng);
}
}
/* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
* should have been called before this.
*/
void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
/* TODO: See if we need a write memory barrier here */
if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
/* For LMAC rings, ring pointer updates are done through FW and
* hence written to a shared memory location that is read by FW
*/
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
}
} else {
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
ath11k_hif_write32(ab,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem,
srng->u.src_ring.hp);
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
ath11k_hif_write32(ab,
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem,
srng->u.dst_ring.tp);
}
}
srng->timestamp = jiffies;
}
void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
struct hal_wbm_idle_scatter_list *sbuf,
u32 nsbufs, u32 tot_link_desc,
u32 end_offset)
{
struct ath11k_buffer_addr *link_addr;
int i;
u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
for (i = 1; i < nsbufs; i++) {
link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
link_addr->info1 = FIELD_PREP(
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
(u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
FIELD_PREP(
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
BASE_ADDR_MATCH_TAG_VAL);
link_addr = (void *)sbuf[i].vaddr +
HAL_WBM_IDLE_SCATTER_BUF_SIZE;
}
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
reg_scatter_buf_sz * nsbufs));
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_RING_BASE_LSB,
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_RING_BASE_MSB,
FIELD_PREP(
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
(u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
FIELD_PREP(
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
BASE_ADDR_MATCH_TAG_VAL));
/* Setup head and tail pointers for the idle list */
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
sbuf[nsbufs - 1].paddr));
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
FIELD_PREP(
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
((u64)sbuf[nsbufs - 1].paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
(end_offset >> 2)));
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
sbuf[0].paddr));
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
sbuf[0].paddr));
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
FIELD_PREP(
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
0));
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
2 * tot_link_desc);
/* Enable the SRNG */
ath11k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40);
}
int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
int ring_num, int mac_id,
struct hal_srng_params *params)
{
struct ath11k_hal *hal = &ab->hal;
struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
struct hal_srng *srng;
int ring_id;
u32 lmac_idx;
int i;
u32 reg_base;
ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
if (ring_id < 0)
return ring_id;
srng = &hal->srng_list[ring_id];
srng->ring_id = ring_id;
srng->ring_dir = srng_config->ring_dir;
srng->ring_base_paddr = params->ring_base_paddr;
srng->ring_base_vaddr = params->ring_base_vaddr;
srng->entry_size = srng_config->entry_size;
srng->num_entries = params->num_entries;
srng->ring_size = srng->entry_size * srng->num_entries;
srng->intr_batch_cntr_thres_entries =
params->intr_batch_cntr_thres_entries;
srng->intr_timer_thres_us = params->intr_timer_thres_us;
srng->flags = params->flags;
srng->msi_addr = params->msi_addr;
srng->msi_data = params->msi_data;
srng->initialized = 1;
spin_lock_init(&srng->lock);
lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
srng->hwreg_base[i] = srng_config->reg_start[i] +
(ring_num * srng_config->reg_size[i]);
}
memset(srng->ring_base_vaddr, 0,
(srng->entry_size * srng->num_entries) << 2);
/* TODO: Add comments on these swap configurations */
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
HAL_SRNG_FLAGS_RING_PTR_SWAP;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.hp = 0;
srng->u.src_ring.cached_tp = 0;
srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
srng->u.src_ring.low_threshold = params->low_threshold *
srng->entry_size;
if (srng_config->lmac_ring) {
lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
lmac_idx);
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
} else {
if (!ab->hw_params.supports_shadow_regs)
srng->u.src_ring.hp_addr =
(u32 *)((unsigned long)ab->mem + reg_base);
else
ath11k_dbg(ab, ATH11K_DBG_HAL,
"type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
type, ring_num,
reg_base,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem);
}
} else {
/* During initialization loop count in all the descriptors
* will be set to zero, and HW will set it to 1 on completing
* descriptor update in first loop, and increments it by 1 on
* subsequent loops (loop count wraps around after reaching
* 0xffff). The 'loop_cnt' in SW ring state is the expected
* loop count in descriptors updated by HW (to be processed
* by SW).
*/
srng->u.dst_ring.loop_cnt = 1;
srng->u.dst_ring.tp = 0;
srng->u.dst_ring.cached_hp = 0;
srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
if (srng_config->lmac_ring) {
/* For LMAC rings, tail pointer updates will be done
* through FW by writing to a shared memory location
*/
lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
lmac_idx);
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
} else {
if (!ab->hw_params.supports_shadow_regs)
srng->u.dst_ring.tp_addr =
(u32 *)((unsigned long)ab->mem + reg_base +
(HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
else
ath11k_dbg(ab, ATH11K_DBG_HAL,
"type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
type, ring_num,
reg_base + (HAL_REO1_RING_TP(ab) -
HAL_REO1_RING_HP(ab)),
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem);
}
}
if (srng_config->lmac_ring)
return ring_id;
ath11k_hal_srng_hw_init(ab, srng);
if (type == HAL_CE_DST) {
srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
ath11k_hal_ce_dst_setup(ab, srng, ring_num);
}
return ring_id;
}
static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
int shadow_cfg_idx,
enum hal_ring_type ring_type,
int ring_num)
{
struct hal_srng *srng;
struct ath11k_hal *hal = &ab->hal;
int ring_id;
struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
if (ring_id < 0)
return;
srng = &hal->srng_list[ring_id];
if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
(unsigned long)ab->mem);
else
srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
(unsigned long)ab->mem);
}
int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
enum hal_ring_type ring_type,
int ring_num)
{
struct ath11k_hal *hal = &ab->hal;
struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
int shadow_cfg_idx = hal->num_shadow_reg_configured;
u32 target_reg;
if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
return -EINVAL;
hal->num_shadow_reg_configured++;
target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
ring_num;
/* For destination ring, shadow the TP */
if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
target_reg += HAL_OFFSET_FROM_HP_TO_TP;
hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
/* update hp/tp addr to hal structure*/
ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
ring_num);
ath11k_dbg(ab, ATH11K_DBG_HAL,
"update shadow config target_reg %x shadow reg 0x%x shadow_idx 0x%x ring_type %d ring num %d",
target_reg,
HAL_SHADOW_REG(ab, shadow_cfg_idx),
shadow_cfg_idx,
ring_type, ring_num);
return 0;
}
void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
int ring_type, ring_num;
/* update all the non-CE srngs. */
for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
if (ring_type == HAL_CE_SRC ||
ring_type == HAL_CE_DST ||
ring_type == HAL_CE_DST_STATUS)
continue;
if (srng_config->lmac_ring)
continue;
for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
}
}
void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
u32 **cfg, u32 *len)
{
struct ath11k_hal *hal = &ab->hal;
*len = hal->num_shadow_reg_configured;
*cfg = hal->shadow_reg_addr;
}
void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
/* check whether the ring is empty. Update the shadow
* HP only when then ring isn't empty.
*/
if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
*srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
ath11k_hal_srng_access_end(ab, srng);
}
static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
struct hal_srng_config *s;
hal->srng_config = kmemdup(hw_srng_config_template,
sizeof(hw_srng_config_template),
GFP_KERNEL);
if (!hal->srng_config)
return -ENOMEM;
s = &hal->srng_config[HAL_REO_DST];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
s = &hal->srng_config[HAL_REO_EXCEPTION];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
s = &hal->srng_config[HAL_REO_REINJECT];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
s = &hal->srng_config[HAL_REO_CMD];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
s = &hal->srng_config[HAL_REO_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
s = &hal->srng_config[HAL_TCL_DATA];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
s = &hal->srng_config[HAL_TCL_CMD];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
s = &hal->srng_config[HAL_TCL_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
s = &hal->srng_config[HAL_CE_SRC];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
ATH11K_CE_OFFSET(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP +
ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s = &hal->srng_config[HAL_CE_DST];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
ATH11K_CE_OFFSET(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP +
ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_CE_DST_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP +
ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_WBM_IDLE_LINK];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
s = &hal->srng_config[HAL_SW2WBM_RELEASE];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
s = &hal->srng_config[HAL_WBM2SW_RELEASE];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
return 0;
}
static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
u32 ring_id;
for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
lockdep_register_key(hal->srng_key + ring_id);
}
static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
u32 ring_id;
for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
lockdep_unregister_key(hal->srng_key + ring_id);
}
int ath11k_hal_srng_init(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
int ret;
memset(hal, 0, sizeof(*hal));
ret = ath11k_hal_srng_create_config(ab);
if (ret)
goto err_hal;
ret = ath11k_hal_alloc_cont_rdp(ab);
if (ret)
goto err_hal;
ret = ath11k_hal_alloc_cont_wrp(ab);
if (ret)
goto err_free_cont_rdp;
ath11k_hal_register_srng_key(ab);
return 0;
err_free_cont_rdp:
ath11k_hal_free_cont_rdp(ab);
err_hal:
return ret;
}
EXPORT_SYMBOL(ath11k_hal_srng_init);
void ath11k_hal_srng_deinit(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
ath11k_hal_unregister_srng_key(ab);
ath11k_hal_free_cont_rdp(ab);
ath11k_hal_free_cont_wrp(ab);
kfree(hal->srng_config);
}
EXPORT_SYMBOL(ath11k_hal_srng_deinit);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
{
struct hal_srng *srng;
struct ath11k_ext_irq_grp *irq_grp;
struct ath11k_ce_pipe *ce_pipe;
int i;
ath11k_err(ab, "Last interrupt received for each CE:\n");
for (i = 0; i < ab->hw_params.ce_count; i++) {
ce_pipe = &ab->ce.ce_pipe[i];
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
i, ce_pipe->pipe_num,
jiffies_to_msecs(jiffies - ce_pipe->timestamp));
}
ath11k_err(ab, "\nLast interrupt received for each group:\n");
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
irq_grp = &ab->ext_irq_grp[i];
ath11k_err(ab, "group_id %d %ums before\n",
irq_grp->grp_id,
jiffies_to_msecs(jiffies - irq_grp->timestamp));
}
for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
srng = &ab->hal.srng_list[i];
if (!srng->initialized)
continue;
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
ath11k_err(ab,
"src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
srng->ring_id, srng->u.src_ring.hp,
srng->u.src_ring.reap_hp,
*srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
srng->u.src_ring.last_tp,
jiffies_to_msecs(jiffies - srng->timestamp));
else if (srng->ring_dir == HAL_SRNG_DIR_DST)
ath11k_err(ab,
"dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
srng->ring_id, srng->u.dst_ring.tp,
*srng->u.dst_ring.hp_addr,
srng->u.dst_ring.cached_hp,
srng->u.dst_ring.last_hp,
jiffies_to_msecs(jiffies - srng->timestamp));
}
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/hal.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include "debug.h"
#include "hif.h"
struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size)
{
struct sk_buff *skb;
skb = dev_alloc_skb(size + sizeof(struct ath11k_htc_hdr));
if (!skb)
return NULL;
skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
/* FW/HTC requires 4-byte aligned streams */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath11k_warn(ab, "Unaligned HTC tx skb\n");
return skb;
}
static void ath11k_htc_control_tx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
kfree_skb(skb);
}
static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
{
struct sk_buff *skb;
struct ath11k_skb_cb *skb_cb;
skb = dev_alloc_skb(ATH11K_HTC_CONTROL_BUFFER_SIZE);
if (!skb)
return NULL;
skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
skb_cb = ATH11K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
return skb;
}
static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath11k_htc_hdr *hdr;
hdr = (struct ath11k_htc_hdr *)skb->data;
memset(hdr, 0, sizeof(*hdr));
hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
FIELD_PREP(HTC_HDR_PAYLOADLEN,
(skb->len - sizeof(*hdr)));
if (ep->tx_credit_flow_enabled)
hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS,
ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
spin_lock_bh(&ep->htc->tx_lock);
hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
spin_unlock_bh(&ep->htc->tx_lock);
}
int ath11k_htc_send(struct ath11k_htc *htc,
enum ath11k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath11k_htc_ep *ep = &htc->endpoint[eid];
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct device *dev = htc->ab->dev;
struct ath11k_base *ab = htc->ab;
int credits = 0;
int ret;
bool credit_flow_enabled = (ab->hw_params.credit_flow &&
ep->tx_credit_flow_enabled);
if (eid >= ATH11K_HTC_EP_COUNT) {
ath11k_warn(ab, "Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
skb_push(skb, sizeof(struct ath11k_htc_hdr));
if (credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
ath11k_dbg(ab, ATH11K_DBG_HTC,
"ep %d insufficient credits required %d total %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
ath11k_dbg(ab, ATH11K_DBG_HTC,
"ep %d credits consumed %d total %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
ath11k_htc_prepare_tx_skb(ep, skb);
skb_cb->eid = eid;
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret) {
ret = -EIO;
goto err_credits;
}
ath11k_dbg(ab, ATH11K_DBG_HTC, "tx skb %p eid %d paddr %pad\n",
skb, skb_cb->eid, &skb_cb->paddr);
ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
if (ret)
goto err_unmap;
return 0;
err_unmap:
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath11k_dbg(ab, ATH11K_DBG_HTC,
"ep %d credits reverted %d total %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ab);
}
err_pull:
skb_pull(skb, sizeof(struct ath11k_htc_hdr));
return ret;
}
static void
ath11k_htc_process_credit_report(struct ath11k_htc *htc,
const struct ath11k_htc_credit_report *report,
int len,
enum ath11k_htc_ep_id eid)
{
struct ath11k_base *ab = htc->ab;
struct ath11k_htc_ep *ep;
int i, n_reports;
if (len % sizeof(*report))
ath11k_warn(ab, "Uneven credit report len %d", len);
n_reports = len / sizeof(*report);
spin_lock_bh(&htc->tx_lock);
for (i = 0; i < n_reports; i++, report++) {
if (report->eid >= ATH11K_HTC_EP_COUNT)
break;
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
ath11k_dbg(ab, ATH11K_DBG_HTC, "ep %d credits got %d total %d\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ab);
spin_lock_bh(&htc->tx_lock);
}
}
spin_unlock_bh(&htc->tx_lock);
}
static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
u8 *buffer,
int length,
enum ath11k_htc_ep_id src_eid)
{
struct ath11k_base *ab = htc->ab;
int status = 0;
struct ath11k_htc_record *record;
size_t len;
while (length > 0) {
record = (struct ath11k_htc_record *)buffer;
if (length < sizeof(record->hdr)) {
status = -EINVAL;
break;
}
if (record->hdr.len > length) {
/* no room left in buffer for record */
ath11k_warn(ab, "Invalid record length: %d\n",
record->hdr.len);
status = -EINVAL;
break;
}
if (ab->hw_params.credit_flow) {
switch (record->hdr.id) {
case ATH11K_HTC_RECORD_CREDITS:
len = sizeof(struct ath11k_htc_credit_report);
if (record->hdr.len < len) {
ath11k_warn(ab, "Credit report too long\n");
status = -EINVAL;
break;
}
ath11k_htc_process_credit_report(htc,
record->credit_report,
record->hdr.len,
src_eid);
break;
default:
ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
}
if (status)
break;
/* multiple records may be present in a trailer */
buffer += sizeof(record->hdr) + record->hdr.len;
length -= sizeof(record->hdr) + record->hdr.len;
}
return status;
}
static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack)
{
ath11k_dbg(ab, ATH11K_DBG_BOOT, "suspend complete %d\n", ack);
if (ack)
set_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
else
clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
complete(&ab->htc_suspend);
}
void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
struct ath11k_htc *htc = &ab->htc;
struct ath11k_htc_ep *ep;
void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
u8 eid;
eid = ATH11K_SKB_CB(skb)->eid;
if (eid >= ATH11K_HTC_EP_COUNT) {
dev_kfree_skb_any(skb);
return;
}
ep = &htc->endpoint[eid];
spin_lock_bh(&htc->tx_lock);
ep_tx_complete = ep->ep_ops.ep_tx_complete;
spin_unlock_bh(&htc->tx_lock);
if (!ep_tx_complete) {
dev_kfree_skb_any(skb);
return;
}
ep_tx_complete(htc->ab, skb);
}
static void ath11k_htc_wakeup_from_suspend(struct ath11k_base *ab)
{
ath11k_dbg(ab, ATH11K_DBG_BOOT, "wakeup from suspend is received\n");
}
void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
int status = 0;
struct ath11k_htc *htc = &ab->htc;
struct ath11k_htc_hdr *hdr;
struct ath11k_htc_ep *ep;
u16 payload_len;
u32 message_id, trailer_len = 0;
size_t min_len;
u8 eid;
bool trailer_present;
hdr = (struct ath11k_htc_hdr *)skb->data;
skb_pull(skb, sizeof(*hdr));
eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
if (eid >= ATH11K_HTC_EP_COUNT) {
ath11k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
goto out;
}
ep = &htc->endpoint[eid];
payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
ath11k_warn(ab, "HTC rx frame too long, len: %zu\n",
payload_len + sizeof(*hdr));
goto out;
}
if (skb->len < payload_len) {
ath11k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
skb->len, payload_len);
goto out;
}
/* get flags to check for trailer */
trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
ATH11K_HTC_FLAG_TRAILER_PRESENT;
ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p trailer_present %d\n",
eid, skb, trailer_present);
if (trailer_present) {
u8 *trailer;
trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
min_len = sizeof(struct ath11k_htc_record_hdr);
if ((trailer_len < min_len) ||
(trailer_len > payload_len)) {
ath11k_warn(ab, "Invalid trailer length: %d\n",
trailer_len);
goto out;
}
trailer = (u8 *)hdr;
trailer += sizeof(*hdr);
trailer += payload_len;
trailer -= trailer_len;
status = ath11k_htc_process_trailer(htc, trailer,
trailer_len, eid);
if (status)
goto out;
skb_trim(skb, skb->len - trailer_len);
}
if (trailer_len >= payload_len)
/* zero length packet with trailer data, just drop these */
goto out;
if (eid == ATH11K_HTC_EP_0) {
struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data;
message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id);
ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p message_id %d\n",
eid, skb, message_id);
switch (message_id) {
case ATH11K_HTC_MSG_READY_ID:
case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/* this is a fatal error, target should not be
* sending unsolicited messages on the ep 0
*/
ath11k_warn(ab, "HTC rx ctrl still processing\n");
complete(&htc->ctl_resp);
goto out;
}
htc->control_resp_len =
min_t(int, skb->len,
ATH11K_HTC_MAX_CTRL_MSG_LEN);
memcpy(htc->control_resp_buffer, skb->data,
htc->control_resp_len);
complete(&htc->ctl_resp);
break;
case ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE:
ath11k_htc_suspend_complete(ab, true);
break;
case ATH11K_HTC_MSG_NACK_SUSPEND:
ath11k_htc_suspend_complete(ab, false);
break;
case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
ath11k_htc_wakeup_from_suspend(ab);
break;
default:
ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n",
FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id));
break;
}
goto out;
}
ep->ep_ops.ep_rx_complete(ab, skb);
/* poll tx completion for interrupt disabled CE's */
ath11k_ce_poll_send_completed(ab, ep->ul_pipe_id);
/* skb is now owned by the rx completion handler */
skb = NULL;
out:
kfree_skb(skb);
}
static void ath11k_htc_control_rx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
/* This is unexpected. FW is not supposed to send regular rx on this
* endpoint.
*/
ath11k_warn(ab, "unexpected htc rx\n");
kfree_skb(skb);
}
static const char *htc_service_name(enum ath11k_htc_svc_id id)
{
switch (id) {
case ATH11K_HTC_SVC_ID_RESERVED:
return "Reserved";
case ATH11K_HTC_SVC_ID_RSVD_CTRL:
return "Control";
case ATH11K_HTC_SVC_ID_WMI_CONTROL:
return "WMI";
case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
return "DATA BE";
case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
return "DATA BK";
case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
return "DATA VI";
case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
return "DATA VO";
case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
return "WMI MAC1";
case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
return "WMI MAC2";
case ATH11K_HTC_SVC_ID_NMI_CONTROL:
return "NMI Control";
case ATH11K_HTC_SVC_ID_NMI_DATA:
return "NMI Data";
case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
return "HTT Data";
case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
return "RAW";
case ATH11K_HTC_SVC_ID_IPA_TX:
return "IPA TX";
case ATH11K_HTC_SVC_ID_PKT_LOG:
return "PKT LOG";
}
return "Unknown";
}
static void ath11k_htc_reset_endpoint_states(struct ath11k_htc *htc)
{
struct ath11k_htc_ep *ep;
int i;
for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
ep = &htc->endpoint[i];
ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
}
}
static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc,
u16 service_id)
{
u8 i, allocation = 0;
for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
if (htc->service_alloc_table[i].service_id == service_id) {
allocation =
htc->service_alloc_table[i].credit_allocation;
}
}
return allocation;
}
static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc)
{
struct ath11k_htc_svc_tx_credits *serv_entry;
u32 svc_id[] = {
ATH11K_HTC_SVC_ID_WMI_CONTROL,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
};
int i, credits;
credits = htc->total_transmit_credits;
serv_entry = htc->service_alloc_table;
if ((htc->wmi_ep_count == 0) ||
(htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
return -EINVAL;
/* Divide credits among number of endpoints for WMI */
credits = credits / htc->wmi_ep_count;
for (i = 0; i < htc->wmi_ep_count; i++) {
serv_entry[i].service_id = svc_id[i];
serv_entry[i].credit_allocation = credits;
}
return 0;
}
int ath11k_htc_wait_target(struct ath11k_htc *htc)
{
int i, status = 0;
struct ath11k_base *ab = htc->ab;
unsigned long time_left;
struct ath11k_htc_ready *ready;
u16 message_id;
u16 credit_count;
u16 credit_size;
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH11K_HTC_WAIT_TIMEOUT_HZ);
if (!time_left) {
ath11k_warn(ab, "failed to receive control response completion, polling..\n");
for (i = 0; i < ab->hw_params.ce_count; i++)
ath11k_ce_per_engine_service(htc->ab, i);
time_left =
wait_for_completion_timeout(&htc->ctl_resp,
ATH11K_HTC_WAIT_TIMEOUT_HZ);
if (!time_left)
status = -ETIMEDOUT;
}
if (status < 0) {
ath11k_warn(ab, "ctl_resp never came in (%d)\n", status);
return status;
}
if (htc->control_resp_len < sizeof(*ready)) {
ath11k_warn(ab, "Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
return -ECOMM;
}
ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
ready->id_credit_count);
credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
if (message_id != ATH11K_HTC_MSG_READY_ID) {
ath11k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
return -ECOMM;
}
htc->total_transmit_credits = credit_count;
htc->target_credit_size = credit_size;
ath11k_dbg(ab, ATH11K_DBG_HTC,
"target ready total_transmit_credits %d target_credit_size %d\n",
htc->total_transmit_credits, htc->target_credit_size);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
ath11k_warn(ab, "Invalid credit size received\n");
return -ECOMM;
}
/* For QCA6390, wmi endpoint uses 1 credit to avoid
* back-to-back write.
*/
if (ab->hw_params.supports_shadow_regs)
htc->total_transmit_credits = 1;
ath11k_htc_setup_target_buffer_assignments(htc);
return 0;
}
int ath11k_htc_connect_service(struct ath11k_htc *htc,
struct ath11k_htc_svc_conn_req *conn_req,
struct ath11k_htc_svc_conn_resp *conn_resp)
{
struct ath11k_base *ab = htc->ab;
struct ath11k_htc_conn_svc *req_msg;
struct ath11k_htc_conn_svc_resp resp_msg_dummy;
struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
struct ath11k_htc_ep *ep;
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
unsigned long time_left;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
/* special case for HTC pseudo control service */
if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
disable_credit_flow_ctrl = true;
assigned_eid = ATH11K_HTC_EP_0;
max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
goto setup;
}
tx_alloc = ath11k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
if (!skb) {
ath11k_warn(ab, "Failed to allocate HTC packet\n");
return -ENOMEM;
}
length = sizeof(*req_msg);
skb_put(skb, length);
memset(skb->data, 0, length);
req_msg = (struct ath11k_htc_conn_svc *)skb->data;
req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
/* Only enable credit flow control for WMI ctrl service */
if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
if (!ab->hw_params.credit_flow) {
flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
conn_req->service_id);
reinit_completion(&htc->ctl_resp);
status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
/* wait for response */
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH11K_HTC_CONN_SVC_TIMEOUT_HZ);
if (!time_left) {
ath11k_err(ab, "Service connect timeout\n");
return -ETIMEDOUT;
}
/* we controlled the buffer creation, it's aligned */
resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
resp_msg->msg_svc_id);
if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
(htc->control_resp_len < sizeof(*resp_msg))) {
ath11k_err(ab, "Invalid resp message ID 0x%x", message_id);
return -EPROTO;
}
ath11k_dbg(ab, ATH11K_DBG_HTC,
"service %s connect response status 0x%lx assigned ep 0x%lx\n",
htc_service_name(service_id),
FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
resp_msg->flags_len);
/* check response status */
if (conn_resp->connect_resp_code != ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
ath11k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
htc_service_name(service_id),
conn_resp->connect_resp_code);
return -EPROTO;
}
assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
HTC_SVC_RESP_MSG_ENDPOINTID,
resp_msg->flags_len);
max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
resp_msg->flags_len);
setup:
if (assigned_eid >= ATH11K_HTC_EP_COUNT)
return -EPROTO;
if (max_msg_size == 0)
return -EPROTO;
ep = &htc->endpoint[assigned_eid];
ep->eid = assigned_eid;
if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
return -EPROTO;
/* return assigned endpoint to caller */
conn_resp->eid = assigned_eid;
conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
resp_msg->flags_len);
/* setup the endpoint */
ep->service_id = conn_req->service_id;
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
resp_msg->flags_len);
ep->tx_credits = tx_alloc;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
status = ath11k_hif_map_service_to_pipe(htc->ab,
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status)
return status;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"htc service '%s' eid %d tx flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
return status;
}
int ath11k_htc_start(struct ath11k_htc *htc)
{
struct sk_buff *skb;
int status = 0;
struct ath11k_base *ab = htc->ab;
struct ath11k_htc_setup_complete_extended *msg;
skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
if (!skb)
return -ENOMEM;
skb_put(skb, sizeof(*msg));
memset(skb->data, 0, skb->len);
msg = (struct ath11k_htc_setup_complete_extended *)skb->data;
msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
if (ab->hw_params.credit_flow)
ath11k_dbg(ab, ATH11K_DBG_HTC, "using tx credit flow control\n");
else
msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW;
status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
return 0;
}
int ath11k_htc_init(struct ath11k_base *ab)
{
struct ath11k_htc *htc = &ab->htc;
struct ath11k_htc_svc_conn_req conn_req;
struct ath11k_htc_svc_conn_resp conn_resp;
int ret;
spin_lock_init(&htc->tx_lock);
ath11k_htc_reset_endpoint_states(htc);
htc->ab = ab;
switch (ab->wmi_ab.preferred_hw_mode) {
case WMI_HOST_HW_MODE_SINGLE:
htc->wmi_ep_count = 1;
break;
case WMI_HOST_HW_MODE_DBS:
case WMI_HOST_HW_MODE_DBS_OR_SBS:
htc->wmi_ep_count = 2;
break;
case WMI_HOST_HW_MODE_DBS_SBS:
htc->wmi_ep_count = 3;
break;
default:
htc->wmi_ep_count = ab->hw_params.max_radios;
break;
}
/* setup our pseudo HTC control endpoint connection */
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath11k_htc_control_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath11k_htc_control_rx_complete;
conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
/* connect fake service */
ret = ath11k_htc_connect_service(htc, &conn_req, &conn_resp);
if (ret) {
ath11k_err(ab, "could not connect to htc service (%d)\n", ret);
return ret;
}
init_completion(&htc->ctl_resp);
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/htc.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/dma-mapping.h>
#include <linux/of_address.h>
#include <linux/iommu.h>
#include "ahb.h"
#include "debug.h"
#include "hif.h"
#include "qmi.h"
#include <linux/remoteproc.h>
#include "pcic.h"
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
static const struct of_device_id ath11k_ahb_of_match[] = {
/* TODO: Should we change the compatible string to something similar
* to one that ath10k uses?
*/
{ .compatible = "qcom,ipq8074-wifi",
.data = (void *)ATH11K_HW_IPQ8074,
},
{ .compatible = "qcom,ipq6018-wifi",
.data = (void *)ATH11K_HW_IPQ6018_HW10,
},
{ .compatible = "qcom,wcn6750-wifi",
.data = (void *)ATH11K_HW_WCN6750_HW10,
},
{ .compatible = "qcom,ipq5018-wifi",
.data = (void *)ATH11K_HW_IPQ5018_HW10,
},
{ }
};
MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
#define ATH11K_IRQ_CE0_OFFSET 4
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
"misc-pulse1",
"misc-latch",
"sw-exception",
"watchdog",
"ce0",
"ce1",
"ce2",
"ce3",
"ce4",
"ce5",
"ce6",
"ce7",
"ce8",
"ce9",
"ce10",
"ce11",
"host2wbm-desc-feed",
"host2reo-re-injection",
"host2reo-command",
"host2rxdma-monitor-ring3",
"host2rxdma-monitor-ring2",
"host2rxdma-monitor-ring1",
"reo2ost-exception",
"wbm2host-rx-release",
"reo2host-status",
"reo2host-destination-ring4",
"reo2host-destination-ring3",
"reo2host-destination-ring2",
"reo2host-destination-ring1",
"rxdma2host-monitor-destination-mac3",
"rxdma2host-monitor-destination-mac2",
"rxdma2host-monitor-destination-mac1",
"ppdu-end-interrupts-mac3",
"ppdu-end-interrupts-mac2",
"ppdu-end-interrupts-mac1",
"rxdma2host-monitor-status-ring-mac3",
"rxdma2host-monitor-status-ring-mac2",
"rxdma2host-monitor-status-ring-mac1",
"host2rxdma-host-buf-ring-mac3",
"host2rxdma-host-buf-ring-mac2",
"host2rxdma-host-buf-ring-mac1",
"rxdma2host-destination-ring-mac3",
"rxdma2host-destination-ring-mac2",
"rxdma2host-destination-ring-mac1",
"host2tcl-input-ring4",
"host2tcl-input-ring3",
"host2tcl-input-ring2",
"host2tcl-input-ring1",
"wbm2host-tx-completions-ring3",
"wbm2host-tx-completions-ring2",
"wbm2host-tx-completions-ring1",
"tcl2host-status-ring",
};
/* enum ext_irq_num - irq numbers that can be used by external modules
* like datapath
*/
enum ext_irq_num {
host2wbm_desc_feed = 16,
host2reo_re_injection,
host2reo_command,
host2rxdma_monitor_ring3,
host2rxdma_monitor_ring2,
host2rxdma_monitor_ring1,
reo2host_exception,
wbm2host_rx_release,
reo2host_status,
reo2host_destination_ring4,
reo2host_destination_ring3,
reo2host_destination_ring2,
reo2host_destination_ring1,
rxdma2host_monitor_destination_mac3,
rxdma2host_monitor_destination_mac2,
rxdma2host_monitor_destination_mac1,
ppdu_end_interrupts_mac3,
ppdu_end_interrupts_mac2,
ppdu_end_interrupts_mac1,
rxdma2host_monitor_status_ring_mac3,
rxdma2host_monitor_status_ring_mac2,
rxdma2host_monitor_status_ring_mac1,
host2rxdma_host_buf_ring_mac3,
host2rxdma_host_buf_ring_mac2,
host2rxdma_host_buf_ring_mac1,
rxdma2host_destination_ring_mac3,
rxdma2host_destination_ring_mac2,
rxdma2host_destination_ring_mac1,
host2tcl_input_ring4,
host2tcl_input_ring3,
host2tcl_input_ring2,
host2tcl_input_ring1,
wbm2host_tx_completions_ring3,
wbm2host_tx_completions_ring2,
wbm2host_tx_completions_ring1,
tcl2host_status_ring,
};
static int
ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
{
return ab->pci.msi.irqs[vector];
}
static inline u32
ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
{
u32 window_start = 0;
/* If offset lies within DP register range, use 1st window */
if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
window_start = ATH11K_PCI_WINDOW_START;
/* If offset lies within CE register range, use 2nd window */
else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
ATH11K_PCI_WINDOW_RANGE_MASK)
window_start = 2 * ATH11K_PCI_WINDOW_START;
return window_start;
}
static void
ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
{
u32 window_start;
/* WCN6750 uses static window based register access*/
window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
iowrite32(value, ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
}
static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
{
u32 window_start;
u32 val;
/* WCN6750 uses static window based register access */
window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
val = ioread32(ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
return val;
}
static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
.wakeup = NULL,
.release = NULL,
.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
.window_write32 = ath11k_ahb_window_write32_wcn6750,
.window_read32 = ath11k_ahb_window_read32_wcn6750,
};
static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
{
return ioread32(ab->mem + offset);
}
static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
iowrite32(value, ab->mem + offset);
}
static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
}
}
static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
{
int i;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
ath11k_ahb_ext_grp_disable(irq_grp);
if (irq_grp->napi_enabled) {
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
irq_grp->napi_enabled = false;
}
}
}
static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
{
int i;
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
{
u32 val;
val = ath11k_ahb_read32(ab, offset);
ath11k_ahb_write32(ab, offset, val | BIT(bit));
}
static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
{
u32 val;
val = ath11k_ahb_read32(ab, offset);
ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
}
static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_attr *ce_attr;
const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
if (ce_attr->dest_nentries) {
ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
ie3_reg_addr);
}
}
static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_attr *ce_attr;
const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
if (ce_attr->dest_nentries) {
ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
ie3_reg_addr);
}
}
static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
{
int i;
int irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
synchronize_irq(ab->irq_num[irq_idx]);
}
}
static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
{
int i, j;
int irq_idx;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++) {
irq_idx = irq_grp->irqs[j];
synchronize_irq(ab->irq_num[irq_idx]);
}
}
}
static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_ahb_ce_irq_enable(ab, i);
}
}
static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_ahb_ce_irq_disable(ab, i);
}
}
static int ath11k_ahb_start(struct ath11k_base *ab)
{
ath11k_ahb_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
return 0;
}
static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
ath11k_ahb_ext_grp_enable(irq_grp);
}
}
static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
{
__ath11k_ahb_ext_irq_disable(ab);
ath11k_ahb_sync_ext_irqs(ab);
}
static void ath11k_ahb_stop(struct ath11k_base *ab)
{
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath11k_ahb_ce_irqs_disable(ab);
ath11k_ahb_sync_ce_irqs(ab);
ath11k_ahb_kill_tasklets(ab);
del_timer_sync(&ab->rx_replenish_retry);
ath11k_ce_cleanup_pipes(ab);
}
static int ath11k_ahb_power_up(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
int ret;
ret = rproc_boot(ab_ahb->tgt_rproc);
if (ret)
ath11k_err(ab, "failed to boot the remote processor Q6\n");
return ret;
}
static void ath11k_ahb_power_down(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
rproc_shutdown(ab_ahb->tgt_rproc);
}
static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
{
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
cfg->tgt_ce_len = ab->hw_params.target_ce_count;
cfg->tgt_ce = ab->hw_params.target_ce_config;
cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
}
static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
{
int i, j;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
}
}
static void ath11k_ahb_free_irq(struct ath11k_base *ab)
{
int irq_idx;
int i;
if (ab->hw_params.hybrid_bus_type)
return ath11k_pcic_free_irq(ab);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
}
ath11k_ahb_free_ext_irq(ab);
}
static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
{
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
}
static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
}
static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
{
struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
struct ath11k_ext_irq_grp,
napi);
struct ath11k_base *ab = irq_grp->ab;
int work_done;
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
ath11k_ahb_ext_grp_enable(irq_grp);
}
if (work_done > budget)
work_done = budget;
return work_done;
}
static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
ath11k_ahb_ext_grp_disable(irq_grp);
napi_schedule(&irq_grp->napi);
return IRQ_HANDLED;
}
static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
{
struct ath11k_hw_params *hw = &ab->hw_params;
int i, j;
int irq;
int ret;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath11k_ahb_ext_grp_napi_poll);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
wbm2host_tx_completions_ring1 - j;
}
if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
reo2host_destination_ring1 - j;
}
if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
irq_grp->irqs[num_irq++] = reo2host_exception;
if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
irq_grp->irqs[num_irq++] = wbm2host_rx_release;
if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
irq_grp->irqs[num_irq++] = reo2host_status;
if (j < ab->hw_params.max_radios) {
if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
rxdma2host_destination_ring_mac1 -
ath11k_hw_get_mac_from_pdev_id(hw, j);
}
if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
host2rxdma_host_buf_ring_mac1 -
ath11k_hw_get_mac_from_pdev_id(hw, j);
}
if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
ppdu_end_interrupts_mac1 -
ath11k_hw_get_mac_from_pdev_id(hw, j);
irq_grp->irqs[num_irq++] =
rxdma2host_monitor_status_ring_mac1 -
ath11k_hw_get_mac_from_pdev_id(hw, j);
}
}
}
irq_grp->num_irq = num_irq;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
irq = platform_get_irq_byname(ab->pdev,
irq_name[irq_idx]);
ab->irq_num[irq_idx] = irq;
irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
IRQF_TRIGGER_RISING,
irq_name[irq_idx], irq_grp);
if (ret) {
ath11k_err(ab, "failed request_irq for %d\n",
irq);
}
}
}
return 0;
}
static int ath11k_ahb_config_irq(struct ath11k_base *ab)
{
int irq, irq_idx, i;
int ret;
if (ab->hw_params.hybrid_bus_type)
return ath11k_pcic_config_irq(ab);
/* Configure CE irqs */
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
IRQF_TRIGGER_RISING, irq_name[irq_idx],
ce_pipe);
if (ret)
return ret;
ab->irq_num[irq_idx] = irq;
}
/* Configure external interrupts */
ret = ath11k_ahb_config_ext_irq(ab);
return ret;
}
static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
entry = &ab->hw_params.svc_to_ce_map[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
switch (__le32_to_cpu(entry->pipedir)) {
case PIPEDIR_NONE:
break;
case PIPEDIR_IN:
WARN_ON(dl_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
break;
case PIPEDIR_OUT:
WARN_ON(ul_set);
*ul_pipe = __le32_to_cpu(entry->pipenum);
ul_set = true;
break;
case PIPEDIR_INOUT:
WARN_ON(dl_set);
WARN_ON(ul_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
*ul_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
ul_set = true;
break;
}
}
if (WARN_ON(!ul_set || !dl_set))
return -ENOENT;
return 0;
}
static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
u32 wake_irq;
u32 value = 0;
int ret;
if (!device_may_wakeup(ab->dev))
return -EPERM;
wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
ret = enable_irq_wake(wake_irq);
if (ret) {
ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
return ret;
}
value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
ATH11K_AHB_SMP2P_SMEM_MSG);
ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
if (ret) {
ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
return ret;
}
static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
u32 wake_irq;
u32 value = 0;
int ret;
if (!device_may_wakeup(ab->dev))
return -EPERM;
wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
ret = disable_irq_wake(wake_irq);
if (ret) {
ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
return ret;
}
reinit_completion(&ab->wow.wakeup_completed);
value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
ATH11K_AHB_SMP2P_SMEM_MSG);
ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
if (ret) {
ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
if (ret == 0) {
ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
return -ETIMEDOUT;
}
ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
return 0;
}
static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
.start = ath11k_ahb_start,
.stop = ath11k_ahb_stop,
.read32 = ath11k_ahb_read32,
.write32 = ath11k_ahb_write32,
.read = NULL,
.irq_enable = ath11k_ahb_ext_irq_enable,
.irq_disable = ath11k_ahb_ext_irq_disable,
.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
.power_down = ath11k_ahb_power_down,
.power_up = ath11k_ahb_power_up,
};
static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
.start = ath11k_pcic_start,
.stop = ath11k_pcic_stop,
.read32 = ath11k_pcic_read32,
.write32 = ath11k_pcic_write32,
.read = NULL,
.irq_enable = ath11k_pcic_ext_irq_enable,
.irq_disable = ath11k_pcic_ext_irq_disable,
.get_msi_address = ath11k_pcic_get_msi_address,
.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
.power_down = ath11k_ahb_power_down,
.power_up = ath11k_ahb_power_up,
.suspend = ath11k_ahb_hif_suspend,
.resume = ath11k_ahb_hif_resume,
.ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
.ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
};
static int ath11k_core_get_rproc(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *dev = ab->dev;
struct rproc *prproc;
phandle rproc_phandle;
if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
ath11k_err(ab, "failed to get q6_rproc handle\n");
return -ENOENT;
}
prproc = rproc_get_by_phandle(rproc_phandle);
if (!prproc) {
ath11k_err(ab, "failed to get rproc\n");
return -EINVAL;
}
ab_ahb->tgt_rproc = prproc;
return 0;
}
static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
{
struct platform_device *pdev = ab->pdev;
phys_addr_t msi_addr_pa;
dma_addr_t msi_addr_iova;
struct resource *res;
int int_prop;
int ret;
int i;
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ath11k_err(ab, "failed to fetch msi_addr\n");
return -ENOENT;
}
msi_addr_pa = res->start;
msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
DMA_FROM_DEVICE, 0);
if (dma_mapping_error(ab->dev, msi_addr_iova))
return -ENOMEM;
ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
if (ret)
return ret;
ab->pci.msi.ep_base_data = int_prop + 32;
for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
ret = platform_get_irq(pdev, i);
if (ret < 0)
return ret;
ab->pci.msi.irqs[i] = ret;
}
set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
return 0;
}
static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
if (!ab->hw_params.smp2p_wow_exit)
return 0;
ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
&ab_ahb->smp2p_info.smem_bit);
if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
ath11k_err(ab, "failed to fetch smem state: %ld\n",
PTR_ERR(ab_ahb->smp2p_info.smem_state));
return PTR_ERR(ab_ahb->smp2p_info.smem_state);
}
return 0;
}
static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
if (!ab->hw_params.smp2p_wow_exit)
return;
qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
}
static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
{
struct platform_device *pdev = ab->pdev;
struct resource *mem_res;
void __iomem *mem;
if (ab->hw_params.hybrid_bus_type)
return ath11k_ahb_setup_msi_resources(ab);
mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(mem)) {
dev_err(&pdev->dev, "ioremap error\n");
return PTR_ERR(mem);
}
ab->mem = mem;
ab->mem_len = resource_size(mem_res);
return 0;
}
static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *dev = ab->dev;
struct device_node *node;
struct resource r;
int ret;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node)
return -ENOENT;
ret = of_address_to_resource(node, 0, &r);
of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
ab_ahb->fw.msa_paddr = r.start;
ab_ahb->fw.msa_size = resource_size(&r);
node = of_parse_phandle(dev->of_node, "memory-region", 1);
if (!node)
return -ENOENT;
ret = of_address_to_resource(node, 0, &r);
of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve ce fixed region\n");
return ret;
}
ab_ahb->fw.ce_paddr = r.start;
ab_ahb->fw.ce_size = resource_size(&r);
return 0;
}
static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *host_dev = ab->dev;
struct platform_device_info info = {0};
struct iommu_domain *iommu_dom;
struct platform_device *pdev;
struct device_node *node;
int ret;
/* Chipsets not requiring MSA need not initialize
* MSA resources, return success in such cases.
*/
if (!ab->hw_params.fixed_fw_mem)
return 0;
ret = ath11k_ahb_setup_msa_resources(ab);
if (ret) {
ath11k_err(ab, "failed to setup msa resources\n");
return ret;
}
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
if (!node) {
ab_ahb->fw.use_tz = true;
return 0;
}
info.fwnode = &node->fwnode;
info.parent = host_dev;
info.name = node->name;
info.dma_mask = DMA_BIT_MASK(32);
pdev = platform_device_register_full(&info);
if (IS_ERR(pdev)) {
of_node_put(node);
return PTR_ERR(pdev);
}
ret = of_dma_configure(&pdev->dev, node, true);
if (ret) {
ath11k_err(ab, "dma configure fail: %d\n", ret);
goto err_unregister;
}
ab_ahb->fw.dev = &pdev->dev;
iommu_dom = iommu_domain_alloc(&platform_bus_type);
if (!iommu_dom) {
ath11k_err(ab, "failed to allocate iommu domain\n");
ret = -ENOMEM;
goto err_unregister;
}
ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
if (ret) {
ath11k_err(ab, "could not attach device: %d\n", ret);
goto err_iommu_free;
}
ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) {
ath11k_err(ab, "failed to map firmware region: %d\n", ret);
goto err_iommu_detach;
}
ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) {
ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
goto err_iommu_unmap;
}
ab_ahb->fw.use_tz = false;
ab_ahb->fw.iommu_domain = iommu_dom;
of_node_put(node);
return 0;
err_iommu_unmap:
iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
err_iommu_detach:
iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
err_iommu_free:
iommu_domain_free(iommu_dom);
err_unregister:
platform_device_unregister(pdev);
of_node_put(node);
return ret;
}
static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct iommu_domain *iommu;
size_t unmapped_size;
/* Chipsets not requiring MSA would have not initialized
* MSA resources, return success in such cases.
*/
if (!ab->hw_params.fixed_fw_mem)
return 0;
if (ab_ahb->fw.use_tz)
return 0;
iommu = ab_ahb->fw.iommu_domain;
unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
if (unmapped_size != ab_ahb->fw.msa_size)
ath11k_err(ab, "failed to unmap firmware: %zu\n",
unmapped_size);
unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
if (unmapped_size != ab_ahb->fw.ce_size)
ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
unmapped_size);
iommu_detach_device(iommu, ab_ahb->fw.dev);
iommu_domain_free(iommu);
platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
return 0;
}
static int ath11k_ahb_probe(struct platform_device *pdev)
{
struct ath11k_base *ab;
const struct of_device_id *of_id;
const struct ath11k_hif_ops *hif_ops;
const struct ath11k_pci_ops *pci_ops;
enum ath11k_hw_rev hw_rev;
int ret;
of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
if (!of_id) {
dev_err(&pdev->dev, "failed to find matching device tree id\n");
return -EINVAL;
}
hw_rev = (uintptr_t)of_id->data;
switch (hw_rev) {
case ATH11K_HW_IPQ8074:
case ATH11K_HW_IPQ6018_HW10:
case ATH11K_HW_IPQ5018_HW10:
hif_ops = &ath11k_ahb_hif_ops_ipq8074;
pci_ops = NULL;
break;
case ATH11K_HW_WCN6750_HW10:
hif_ops = &ath11k_ahb_hif_ops_wcn6750;
pci_ops = &ath11k_ahb_pci_ops_wcn6750;
break;
default:
dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
return -EOPNOTSUPP;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
return ret;
}
ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
ATH11K_BUS_AHB);
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
}
ab->hif.ops = hif_ops;
ab->pdev = pdev;
ab->hw_rev = hw_rev;
ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
platform_set_drvdata(pdev, ab);
ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
if (ret) {
ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
goto err_core_free;
}
ret = ath11k_core_pre_init(ab);
if (ret)
goto err_core_free;
ret = ath11k_ahb_setup_resources(ab);
if (ret)
goto err_core_free;
ab->mem_ce = ab->mem;
if (ab->hw_params.ce_remap) {
const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
/* ce register space is moved out of wcss unlike ipq8074 or ipq6018
* and the space is not contiguous, hence remapping the CE registers
* to a new space for accessing them.
*/
ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
if (!ab->mem_ce) {
dev_err(&pdev->dev, "ce ioremap error\n");
ret = -ENOMEM;
goto err_core_free;
}
}
ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
goto err_core_free;
ret = ath11k_ahb_setup_smp2p_handle(ab);
if (ret)
goto err_fw_deinit;
ret = ath11k_hal_srng_init(ab);
if (ret)
goto err_release_smp2p_handle;
ret = ath11k_ce_alloc_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
goto err_hal_srng_deinit;
}
ath11k_ahb_init_qmi_ce_config(ab);
ret = ath11k_core_get_rproc(ab);
if (ret) {
ath11k_err(ab, "failed to get rproc: %d\n", ret);
goto err_ce_free;
}
ret = ath11k_core_init(ab);
if (ret) {
ath11k_err(ab, "failed to init core: %d\n", ret);
goto err_ce_free;
}
ret = ath11k_ahb_config_irq(ab);
if (ret) {
ath11k_err(ab, "failed to configure irq: %d\n", ret);
goto err_ce_free;
}
ath11k_qmi_fwreset_from_cold_boot(ab);
return 0;
err_ce_free:
ath11k_ce_free_pipes(ab);
err_hal_srng_deinit:
ath11k_hal_srng_deinit(ab);
err_release_smp2p_handle:
ath11k_ahb_release_smp2p_handle(ab);
err_fw_deinit:
ath11k_ahb_fw_resource_deinit(ab);
err_core_free:
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
return ret;
}
static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
{
unsigned long left;
if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
left = wait_for_completion_timeout(&ab->driver_recovery,
ATH11K_AHB_RECOVERY_TIMEOUT);
if (!left)
ath11k_warn(ab, "failed to receive recovery response completion\n");
}
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->restart_work);
cancel_work_sync(&ab->qmi.event_work);
}
static void ath11k_ahb_free_resources(struct ath11k_base *ab)
{
struct platform_device *pdev = ab->pdev;
ath11k_ahb_free_irq(ab);
ath11k_hal_srng_deinit(ab);
ath11k_ahb_release_smp2p_handle(ab);
ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
if (ab->hw_params.ce_remap)
iounmap(ab->mem_ce);
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
}
static int ath11k_ahb_remove(struct platform_device *pdev)
{
struct ath11k_base *ab = platform_get_drvdata(pdev);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_ahb_power_down(ab);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
}
ath11k_ahb_remove_prepare(ab);
ath11k_core_deinit(ab);
qmi_fail:
ath11k_ahb_free_resources(ab);
return 0;
}
static void ath11k_ahb_shutdown(struct platform_device *pdev)
{
struct ath11k_base *ab = platform_get_drvdata(pdev);
/* platform shutdown() & remove() are mutually exclusive.
* remove() is invoked during rmmod & shutdown() during
* system reboot/shutdown.
*/
ath11k_ahb_remove_prepare(ab);
if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
goto free_resources;
ath11k_core_deinit(ab);
free_resources:
ath11k_ahb_free_resources(ab);
}
static struct platform_driver ath11k_ahb_driver = {
.driver = {
.name = "ath11k",
.of_match_table = ath11k_ahb_of_match,
},
.probe = ath11k_ahb_probe,
.remove = ath11k_ahb_remove,
.shutdown = ath11k_ahb_shutdown,
};
module_platform_driver(ath11k_ahb_driver);
MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
MODULE_LICENSE("Dual BSD/GPL");
|
linux-master
|
drivers/net/wireless/ath/ath11k/ahb.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/ioport.h>
#include "core.h"
#include "debug.h"
#include "mhi.h"
#include "pci.h"
#include "pcic.h"
#define MHI_TIMEOUT_DEFAULT_MS 20000
#define RDDM_DUMP_SIZE 0x420000
static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
.num = 0,
.name = "LOOPBACK",
.num_elements = 32,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 1,
.name = "LOOPBACK",
.num_elements = 32,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 20,
.name = "IPCR",
.num_elements = 64,
.event_ring = 1,
.dir = DMA_TO_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 21,
.name = "IPCR",
.num_elements = 64,
.event_ring = 1,
.dir = DMA_FROM_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = true,
},
};
static struct mhi_event_config ath11k_mhi_events_qca6390[] = {
{
.num_elements = 32,
.irq_moderation_ms = 0,
.irq = 1,
.mode = MHI_DB_BRST_DISABLE,
.data_type = MHI_ER_CTRL,
.hardware_event = false,
.client_managed = false,
.offload_channel = false,
},
{
.num_elements = 256,
.irq_moderation_ms = 1,
.irq = 2,
.mode = MHI_DB_BRST_DISABLE,
.priority = 1,
.hardware_event = false,
.client_managed = false,
.offload_channel = false,
},
};
static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
.buf_len = 0,
.num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
.ch_cfg = ath11k_mhi_channels_qca6390,
.num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
.event_cfg = ath11k_mhi_events_qca6390,
};
static struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
{
.num = 0,
.name = "LOOPBACK",
.num_elements = 32,
.event_ring = 1,
.dir = DMA_TO_DEVICE,
.ee_mask = 0x14,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 1,
.name = "LOOPBACK",
.num_elements = 32,
.event_ring = 1,
.dir = DMA_FROM_DEVICE,
.ee_mask = 0x14,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 20,
.name = "IPCR",
.num_elements = 32,
.event_ring = 1,
.dir = DMA_TO_DEVICE,
.ee_mask = 0x14,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 21,
.name = "IPCR",
.num_elements = 32,
.event_ring = 1,
.dir = DMA_FROM_DEVICE,
.ee_mask = 0x14,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = true,
},
};
static struct mhi_event_config ath11k_mhi_events_qcn9074[] = {
{
.num_elements = 32,
.irq_moderation_ms = 0,
.irq = 1,
.data_type = MHI_ER_CTRL,
.mode = MHI_DB_BRST_DISABLE,
.hardware_event = false,
.client_managed = false,
.offload_channel = false,
},
{
.num_elements = 256,
.irq_moderation_ms = 1,
.irq = 2,
.mode = MHI_DB_BRST_DISABLE,
.priority = 1,
.hardware_event = false,
.client_managed = false,
.offload_channel = false,
},
};
static struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
.max_channels = 30,
.timeout_ms = 10000,
.use_bounce_buf = false,
.buf_len = 0,
.num_channels = ARRAY_SIZE(ath11k_mhi_channels_qcn9074),
.ch_cfg = ath11k_mhi_channels_qcn9074,
.num_events = ARRAY_SIZE(ath11k_mhi_events_qcn9074),
.event_cfg = ath11k_mhi_events_qcn9074,
};
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
{
u32 val;
val = ath11k_pcic_read32(ab, MHISTATUS);
ath11k_dbg(ab, ATH11K_DBG_PCI, "mhistatus 0x%x\n", val);
/* Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS
* has SYSERR bit set and thus need to set MHICTRL_RESET
* to clear SYSERR.
*/
ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
mdelay(10);
}
static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab)
{
ath11k_pcic_write32(ab, PCIE_TXVECDB, 0);
}
static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab)
{
ath11k_pcic_write32(ab, PCIE_TXVECSTATUS, 0);
}
static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab)
{
ath11k_pcic_write32(ab, PCIE_RXVECDB, 0);
}
static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab)
{
ath11k_pcic_write32(ab, PCIE_RXVECSTATUS, 0);
}
void ath11k_mhi_clear_vector(struct ath11k_base *ab)
{
ath11k_mhi_reset_txvecdb(ab);
ath11k_mhi_reset_txvecstatus(ab);
ath11k_mhi_reset_rxvecdb(ab);
ath11k_mhi_reset_rxvecstatus(ab);
}
static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
u32 user_base_data, base_vector;
int ret, num_vectors, i;
int *irq;
unsigned int msi_data;
ret = ath11k_pcic_get_user_msi_assignment(ab, "MHI", &num_vectors,
&user_base_data, &base_vector);
if (ret)
return ret;
ath11k_dbg(ab, ATH11K_DBG_PCI, "num_vectors %d base_vector %d\n",
num_vectors, base_vector);
irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
if (!irq)
return -ENOMEM;
for (i = 0; i < num_vectors; i++) {
msi_data = base_vector;
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
msi_data += i;
irq[i] = ath11k_pci_get_msi_irq(ab, msi_data);
}
ab_pci->mhi_ctrl->irq = irq;
ab_pci->mhi_ctrl->nr_irqs = num_vectors;
return 0;
}
static int ath11k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
{
}
static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason)
{
switch (reason) {
case MHI_CB_IDLE:
return "MHI_CB_IDLE";
case MHI_CB_PENDING_DATA:
return "MHI_CB_PENDING_DATA";
case MHI_CB_LPM_ENTER:
return "MHI_CB_LPM_ENTER";
case MHI_CB_LPM_EXIT:
return "MHI_CB_LPM_EXIT";
case MHI_CB_EE_RDDM:
return "MHI_CB_EE_RDDM";
case MHI_CB_EE_MISSION_MODE:
return "MHI_CB_EE_MISSION_MODE";
case MHI_CB_SYS_ERROR:
return "MHI_CB_SYS_ERROR";
case MHI_CB_FATAL_ERROR:
return "MHI_CB_FATAL_ERROR";
case MHI_CB_BW_REQ:
return "MHI_CB_BW_REQ";
default:
return "UNKNOWN";
}
};
static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "notify status reason %s\n",
ath11k_mhi_op_callback_to_str(cb));
switch (cb) {
case MHI_CB_SYS_ERROR:
ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break;
case MHI_CB_EE_RDDM:
if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
break;
default:
break;
}
}
static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *addr,
u32 *out)
{
*out = readl(addr);
return 0;
}
static void ath11k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
void __iomem *addr,
u32 val)
{
writel(val, addr);
}
static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
{
struct device_node *np;
struct resource res;
int ret;
np = of_find_node_by_type(NULL, "memory");
if (!np)
return -ENOENT;
ret = of_address_to_resource(np, 0, &res);
of_node_put(np);
if (ret)
return ret;
mhi_ctrl->iova_start = res.start + 0x1000000;
mhi_ctrl->iova_stop = res.end;
return 0;
}
int ath11k_mhi_register(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
struct mhi_controller_config *ath11k_mhi_config;
int ret;
mhi_ctrl = mhi_alloc_controller();
if (!mhi_ctrl)
return -ENOMEM;
ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE,
ab_pci->amss_path,
sizeof(ab_pci->amss_path));
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
mhi_ctrl->fw_image = ab_pci->amss_path;
mhi_ctrl->regs = ab->mem;
mhi_ctrl->reg_len = ab->mem_len;
ret = ath11k_mhi_get_msi(ab_pci);
if (ret) {
ath11k_err(ab, "failed to get msi for mhi\n");
goto free_controller;
}
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl);
if (ret < 0)
goto free_controller;
} else {
mhi_ctrl->iova_start = 0;
mhi_ctrl->iova_stop = 0xFFFFFFFF;
}
mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
mhi_ctrl->runtime_get = ath11k_mhi_op_runtime_get;
mhi_ctrl->runtime_put = ath11k_mhi_op_runtime_put;
mhi_ctrl->status_cb = ath11k_mhi_op_status_cb;
mhi_ctrl->read_reg = ath11k_mhi_op_read_reg;
mhi_ctrl->write_reg = ath11k_mhi_op_write_reg;
switch (ab->hw_rev) {
case ATH11K_HW_QCN9074_HW10:
ath11k_mhi_config = &ath11k_mhi_config_qcn9074;
break;
case ATH11K_HW_QCA6390_HW20:
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n",
ab->hw_rev);
ret = -EINVAL;
goto free_controller;
}
ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
if (ret) {
ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
goto free_controller;
}
return 0;
free_controller:
mhi_free_controller(mhi_ctrl);
ab_pci->mhi_ctrl = NULL;
return ret;
}
void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
{
struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
mhi_unregister_controller(mhi_ctrl);
kfree(mhi_ctrl->irq);
mhi_free_controller(mhi_ctrl);
}
int ath11k_mhi_start(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
int ret;
ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
if (ret) {
ath11k_warn(ab, "failed to prepare mhi: %d", ret);
return ret;
}
ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
if (ret) {
ath11k_warn(ab, "failed to power up mhi: %d", ret);
return ret;
}
return 0;
}
void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
{
mhi_power_down(ab_pci->mhi_ctrl, true);
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
int ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
int ret;
ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
if (ret) {
ath11k_warn(ab, "failed to suspend mhi: %d", ret);
return ret;
}
return 0;
}
int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
int ret;
/* Do force MHI resume as some devices like QCA6390, WCN6855
* are not in M3 state but they are functional. So just ignore
* the MHI state while resuming.
*/
ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
if (ret) {
ath11k_warn(ab, "failed to resume mhi: %d", ret);
return ret;
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/mhi.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
#include "hif.h"
#include "debug.h"
#include "dp_rx.h"
#include "peer.h"
static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
/* TODO: Any other peer specific DP cleanup */
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, addr);
if (!peer) {
ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
addr, vdev_id);
spin_unlock_bh(&ab->base_lock);
return;
}
ath11k_peer_rx_tid_cleanup(ar, peer);
peer->dp_setup_done = false;
crypto_free_shash(peer->tfm_mmic);
spin_unlock_bh(&ab->base_lock);
}
int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
u32 reo_dest;
int ret = 0, tid;
/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
reo_dest = ar->dp.mac_id + 1;
ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
WMI_PEER_SET_DEFAULT_ROUTING,
DP_RX_HASH_ENABLE | (reo_dest << 1));
if (ret) {
ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
ret, addr, vdev_id);
return ret;
}
for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
HAL_PN_TYPE_NONE);
if (ret) {
ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
tid, ret);
goto peer_clean;
}
}
ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
if (ret) {
ath11k_warn(ab, "failed to setup rx defrag context\n");
tid--;
goto peer_clean;
}
/* TODO: Setup other peer specific resource used in data path */
return 0;
peer_clean:
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, vdev_id, addr);
if (!peer) {
ath11k_warn(ab, "failed to find the peer to del rx tid\n");
spin_unlock_bh(&ab->base_lock);
return -ENOENT;
}
for (; tid >= 0; tid--)
ath11k_peer_rx_tid_delete(ar, peer, tid);
spin_unlock_bh(&ab->base_lock);
return ret;
}
void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
{
if (!ring->vaddr_unaligned)
return;
if (ring->cached)
kfree(ring->vaddr_unaligned);
else
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
ring->vaddr_unaligned = NULL;
}
static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
{
int ext_group_num;
u8 mask = 1 << ring_num;
for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
ext_group_num++) {
if (mask & grp_mask[ext_group_num])
return ext_group_num;
}
return -ENOENT;
}
static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
enum hal_ring_type type, int ring_num)
{
const u8 *grp_mask;
switch (type) {
case HAL_WBM2SW_RELEASE:
if (ring_num == DP_RX_RELEASE_RING_NUM) {
grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
grp_mask = &ab->hw_params.ring_mask->tx[0];
}
break;
case HAL_REO_EXCEPTION:
grp_mask = &ab->hw_params.ring_mask->rx_err[0];
break;
case HAL_REO_DST:
grp_mask = &ab->hw_params.ring_mask->rx[0];
break;
case HAL_REO_STATUS:
grp_mask = &ab->hw_params.ring_mask->reo_status[0];
break;
case HAL_RXDMA_MONITOR_STATUS:
case HAL_RXDMA_MONITOR_DST:
grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
break;
case HAL_RXDMA_DST:
grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
break;
case HAL_RXDMA_BUF:
grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
break;
case HAL_RXDMA_MONITOR_BUF:
case HAL_TCL_DATA:
case HAL_TCL_CMD:
case HAL_REO_CMD:
case HAL_SW2WBM_RELEASE:
case HAL_WBM_IDLE_LINK:
case HAL_TCL_STATUS:
case HAL_REO_REINJECT:
case HAL_CE_SRC:
case HAL_CE_DST:
case HAL_CE_DST_STATUS:
default:
return -ENOENT;
}
return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
}
static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
struct hal_srng_params *ring_params,
enum hal_ring_type type, int ring_num)
{
int msi_group_number, msi_data_count;
u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
int ret;
ret = ath11k_get_user_msi_vector(ab, "DP",
&msi_data_count, &msi_data_start,
&msi_irq_start);
if (ret)
return;
msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
ring_num);
if (msi_group_number < 0) {
ath11k_dbg(ab, ATH11K_DBG_PCI,
"ring not part of an ext_group; ring_type: %d,ring_num %d",
type, ring_num);
ring_params->msi_addr = 0;
ring_params->msi_data = 0;
return;
}
if (msi_group_number > msi_data_count) {
ath11k_dbg(ab, ATH11K_DBG_PCI,
"multiple msi_groups share one msi, msi_group_num %d",
msi_group_number);
}
ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
ring_params->msi_addr = addr_lo;
ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
ring_params->msi_data = (msi_group_number % msi_data_count)
+ msi_data_start;
ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
}
int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
enum hal_ring_type type, int ring_num,
int mac_id, int num_entries)
{
struct hal_srng_params params = { 0 };
int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
int ret;
bool cached = false;
if (max_entries < 0 || entry_sz < 0)
return -EINVAL;
if (num_entries > max_entries)
num_entries = max_entries;
ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
if (ab->hw_params.alloc_cacheable_memory) {
/* Allocate the reo dst and tx completion rings from cacheable memory */
switch (type) {
case HAL_REO_DST:
case HAL_WBM2SW_RELEASE:
cached = true;
break;
default:
cached = false;
}
if (cached) {
ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
}
}
if (!cached)
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);
if (!ring->vaddr_unaligned)
return -ENOMEM;
ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
(unsigned long)ring->vaddr_unaligned);
params.ring_base_vaddr = ring->vaddr;
params.ring_base_paddr = ring->paddr;
params.num_entries = num_entries;
ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id);
switch (type) {
case HAL_REO_DST:
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_RX;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
case HAL_RXDMA_BUF:
case HAL_RXDMA_MONITOR_BUF:
case HAL_RXDMA_MONITOR_STATUS:
params.low_threshold = num_entries >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.intr_batch_cntr_thres_entries = 0;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
case HAL_WBM2SW_RELEASE:
if (ring_num < 3) {
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_TX;
params.intr_timer_thres_us =
HAL_SRNG_INT_TIMER_THRESHOLD_TX;
break;
}
/* follow through when ring_num >= 3 */
fallthrough;
case HAL_REO_EXCEPTION:
case HAL_REO_REINJECT:
case HAL_REO_CMD:
case HAL_REO_STATUS:
case HAL_TCL_DATA:
case HAL_TCL_CMD:
case HAL_TCL_STATUS:
case HAL_WBM_IDLE_LINK:
case HAL_SW2WBM_RELEASE:
case HAL_RXDMA_DST:
case HAL_RXDMA_MONITOR_DST:
case HAL_RXDMA_MONITOR_DESC:
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
break;
case HAL_RXDMA_DIR_BUF:
break;
default:
ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
return -EINVAL;
}
if (cached) {
params.flags |= HAL_SRNG_FLAGS_CACHED;
ring->cached = 1;
}
ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms);
if (ret < 0) {
ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
ret, ring_num);
return ret;
}
ring->ring_id = ret;
return 0;
}
void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
{
int i;
if (!ab->hw_params.supports_shadow_regs)
return;
for (i = 0; i < ab->hw_params.max_tx_ring; i++)
ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
}
static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
int i;
ath11k_dp_stop_shadow_timers(ab);
ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
}
ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
}
static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
u8 tcl_num, wbm_num;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
DP_WBM_RELEASE_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
DP_TCL_CMD_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
0, 0, DP_TCL_STATUS_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
goto err;
}
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
HAL_TCL_DATA, tcl_num, 0,
ab->hw_params.tx_ring_size);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
i, ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
HAL_WBM2SW_RELEASE, wbm_num, 0,
DP_TX_COMP_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
i, ret);
goto err;
}
srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
ath11k_hal_tx_init_data_ring(ab, srng);
ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
ATH11K_SHADOW_DP_TIMER_INTERVAL,
dp->tx_ring[i].tcl_data_ring.ring_id);
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
0, 0, DP_REO_REINJECT_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
0, 0, DP_REO_EXCEPTION_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
ret);
goto err;
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
0, 0, DP_REO_CMD_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
goto err;
}
srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
ath11k_hal_reo_init_cmd_ring(ab, srng);
ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
dp->reo_cmd_ring.ring_id);
ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
0, 0, DP_REO_STATUS_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
goto err;
}
/* When hash based routing of rx packet is enabled, 32 entries to map
* the hash values to the ring will be configured.
*/
ab->hw_params.hw_ops->reo_setup(ab);
return 0;
err:
ath11k_dp_srng_common_cleanup(ab);
return ret;
}
static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
int i;
for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
if (!slist[i].vaddr)
continue;
dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
slist[i].vaddr, slist[i].paddr);
slist[i].vaddr = NULL;
}
}
static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
int size,
u32 n_link_desc_bank,
u32 n_link_desc,
u32 last_bank_sz)
{
struct ath11k_dp *dp = &ab->dp;
struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
u32 n_entries_per_buf;
int num_scatter_buf, scatter_idx;
struct hal_wbm_link_desc *scatter_buf;
int align_bytes, n_entries;
dma_addr_t paddr;
int rem_entries;
int i;
int ret = 0;
u32 end_offset;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
return -EINVAL;
for (i = 0; i < num_scatter_buf; i++) {
slist[i].vaddr = dma_alloc_coherent(ab->dev,
HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
&slist[i].paddr, GFP_KERNEL);
if (!slist[i].vaddr) {
ret = -ENOMEM;
goto err;
}
}
scatter_idx = 0;
scatter_buf = slist[scatter_idx].vaddr;
rem_entries = n_entries_per_buf;
for (i = 0; i < n_link_desc_bank; i++) {
align_bytes = link_desc_banks[i].vaddr -
link_desc_banks[i].vaddr_unaligned;
n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
HAL_LINK_DESC_SIZE;
paddr = link_desc_banks[i].paddr;
while (n_entries) {
ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
if (rem_entries) {
rem_entries--;
scatter_buf++;
continue;
}
rem_entries = n_entries_per_buf;
scatter_idx++;
scatter_buf = slist[scatter_idx].vaddr;
}
}
end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
sizeof(struct hal_wbm_link_desc);
ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
n_link_desc, end_offset);
return 0;
err:
ath11k_dp_scatter_idle_link_desc_cleanup(ab);
return ret;
}
static void
ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
struct dp_link_desc_bank *link_desc_banks)
{
int i;
for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
if (link_desc_banks[i].vaddr_unaligned) {
dma_free_coherent(ab->dev,
link_desc_banks[i].size,
link_desc_banks[i].vaddr_unaligned,
link_desc_banks[i].paddr_unaligned);
link_desc_banks[i].vaddr_unaligned = NULL;
}
}
}
static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
struct dp_link_desc_bank *desc_bank,
int n_link_desc_bank,
int last_bank_sz)
{
struct ath11k_dp *dp = &ab->dp;
int i;
int ret = 0;
int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
for (i = 0; i < n_link_desc_bank; i++) {
if (i == (n_link_desc_bank - 1) && last_bank_sz)
desc_sz = last_bank_sz;
desc_bank[i].vaddr_unaligned =
dma_alloc_coherent(ab->dev, desc_sz,
&desc_bank[i].paddr_unaligned,
GFP_KERNEL);
if (!desc_bank[i].vaddr_unaligned) {
ret = -ENOMEM;
goto err;
}
desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
HAL_LINK_DESC_ALIGN);
desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
((unsigned long)desc_bank[i].vaddr -
(unsigned long)desc_bank[i].vaddr_unaligned);
desc_bank[i].size = desc_sz;
}
return 0;
err:
ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
return ret;
}
void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
struct dp_link_desc_bank *desc_bank,
u32 ring_type, struct dp_srng *ring)
{
ath11k_dp_link_desc_bank_free(ab, desc_bank);
if (ring_type != HAL_RXDMA_MONITOR_DESC) {
ath11k_dp_srng_cleanup(ab, ring);
ath11k_dp_scatter_idle_link_desc_cleanup(ab);
}
}
static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
{
struct ath11k_dp *dp = &ab->dp;
u32 n_mpdu_link_desc, n_mpdu_queue_desc;
u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
int ret = 0;
n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
HAL_NUM_MPDUS_PER_LINK_DESC;
n_mpdu_queue_desc = n_mpdu_link_desc /
HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
DP_AVG_MSDUS_PER_FLOW) /
HAL_NUM_TX_MSDUS_PER_LINK_DESC;
n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
DP_AVG_MSDUS_PER_MPDU) /
HAL_NUM_RX_MSDUS_PER_LINK_DESC;
*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
n_tx_msdu_link_desc + n_rx_msdu_link_desc;
if (*n_link_desc & (*n_link_desc - 1))
*n_link_desc = 1 << fls(*n_link_desc);
ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
return ret;
}
return ret;
}
int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
struct dp_link_desc_bank *link_desc_banks,
u32 ring_type, struct hal_srng *srng,
u32 n_link_desc)
{
u32 tot_mem_sz;
u32 n_link_desc_bank, last_bank_sz;
u32 entry_sz, align_bytes, n_entries;
u32 paddr;
u32 *desc;
int i, ret;
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
tot_mem_sz += HAL_LINK_DESC_ALIGN;
if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
n_link_desc_bank = 1;
last_bank_sz = tot_mem_sz;
} else {
n_link_desc_bank = tot_mem_sz /
(DP_LINK_DESC_ALLOC_SIZE_THRESH -
HAL_LINK_DESC_ALIGN);
last_bank_sz = tot_mem_sz %
(DP_LINK_DESC_ALLOC_SIZE_THRESH -
HAL_LINK_DESC_ALIGN);
if (last_bank_sz)
n_link_desc_bank += 1;
}
if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
return -EINVAL;
ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
n_link_desc_bank, last_bank_sz);
if (ret)
return ret;
/* Setup link desc idle list for HW internal usage */
entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
tot_mem_sz = entry_sz * n_link_desc;
/* Setup scatter desc list when the total memory requirement is more */
if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
ring_type != HAL_RXDMA_MONITOR_DESC) {
ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
n_link_desc_bank,
n_link_desc,
last_bank_sz);
if (ret) {
ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
ret);
goto fail_desc_bank_free;
}
return 0;
}
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
for (i = 0; i < n_link_desc_bank; i++) {
align_bytes = link_desc_banks[i].vaddr -
link_desc_banks[i].vaddr_unaligned;
n_entries = (link_desc_banks[i].size - align_bytes) /
HAL_LINK_DESC_SIZE;
paddr = link_desc_banks[i].paddr;
while (n_entries &&
(desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
i, paddr);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
}
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return 0;
fail_desc_bank_free:
ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
return ret;
}
int ath11k_dp_service_srng(struct ath11k_base *ab,
struct ath11k_ext_irq_grp *irq_grp,
int budget)
{
struct napi_struct *napi = &irq_grp->napi;
const struct ath11k_hw_hal_params *hal_params;
int grp_id = irq_grp->grp_id;
int work_done = 0;
int i, j;
int tot_work_done = 0;
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
ab->hw_params.ring_mask->tx[grp_id])
ath11k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params.ring_mask->rx_err[grp_id]) {
work_done = ath11k_dp_process_rx_err(ab, napi, budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
work_done = ath11k_dp_rx_process_wbm_err(ab,
napi,
budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
if (ab->hw_params.ring_mask->rx[grp_id]) {
i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
work_done = ath11k_dp_process_rx(ab, i, napi,
budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
for (i = 0; i < ab->num_radios; i++) {
for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
int id = i * ab->hw_params.num_rxmda_per_pdev + j;
if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
BIT(id)) {
work_done =
ath11k_dp_rx_process_mon_rings(ab,
id,
napi, budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
}
}
}
if (ab->hw_params.ring_mask->reo_status[grp_id])
ath11k_dp_process_reo_status(ab);
for (i = 0; i < ab->num_radios; i++) {
for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
int id = i * ab->hw_params.num_rxmda_per_pdev + j;
if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
budget -= work_done;
tot_work_done += work_done;
}
if (budget <= 0)
goto done;
if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
struct ath11k *ar = ath11k_ab_to_ar(ab, id);
struct ath11k_pdev_dp *dp = &ar->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
hal_params = ab->hw_params.hal_params;
ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
hal_params->rx_buf_rbm);
}
}
}
/* TODO: Implement handler for other interrupts */
done:
return tot_work_done;
}
EXPORT_SYMBOL(ath11k_dp_service_srng);
void ath11k_dp_pdev_free(struct ath11k_base *ab)
{
struct ath11k *ar;
int i;
del_timer_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ath11k_dp_rx_pdev_free(ab, i);
ath11k_debugfs_unregister(ar);
ath11k_dp_rx_pdev_mon_detach(ar);
}
}
void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
{
struct ath11k *ar;
struct ath11k_pdev_dp *dp;
int i;
int j;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
dp = &ar->dp;
dp->mac_id = i;
idr_init(&dp->rx_refill_buf_ring.bufs_idr);
spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq);
for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
}
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
}
}
int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
{
struct ath11k *ar;
int ret;
int i;
/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ret = ath11k_dp_rx_pdev_alloc(ab, i);
if (ret) {
ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
i);
goto err;
}
ret = ath11k_dp_rx_pdev_mon_attach(ar);
if (ret) {
ath11k_warn(ab, "failed to initialize mon pdev %d\n",
i);
goto err;
}
}
return 0;
err:
ath11k_dp_pdev_free(ab);
return ret;
}
int ath11k_dp_htt_connect(struct ath11k_dp *dp)
{
struct ath11k_htc_svc_conn_req conn_req;
struct ath11k_htc_svc_conn_resp conn_resp;
int status;
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
/* connect to control service */
conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
&conn_resp);
if (status)
return status;
dp->eid = conn_resp.eid;
return 0;
}
static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
{
/* When v2_map_support is true:for STA mode, enable address
* search index, tcl uses ast_hash value in the descriptor.
* When v2_map_support is false: for STA mode, don't enable
* address search index.
*/
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
} else {
arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
}
break;
case WMI_VDEV_TYPE_AP:
case WMI_VDEV_TYPE_IBSS:
arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
break;
case WMI_VDEV_TYPE_MONITOR:
default:
return;
}
}
void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
{
arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
arvif->vdev_id) |
FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
ar->pdev->pdev_id);
/* set HTT extension valid bit to 0 by default */
arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
ath11k_dp_update_vdev_search(arvif);
}
static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
{
struct ath11k_base *ab = (struct ath11k_base *)ctx;
struct sk_buff *msdu = skb;
dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(msdu);
return 0;
}
void ath11k_dp_free(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
int i;
ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
ath11k_dp_srng_common_cleanup(ab);
ath11k_dp_reo_cmd_list_cleanup(ab);
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
idr_for_each(&dp->tx_ring[i].txbuf_idr,
ath11k_dp_tx_pending_cleanup, ab);
idr_destroy(&dp->tx_ring[i].txbuf_idr);
spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
kfree(dp->tx_ring[i].tx_status);
}
/* Deinit any SOC level resource */
}
int ath11k_dp_alloc(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng = NULL;
size_t size = 0;
u32 n_link_desc = 0;
int ret;
int i;
dp->ab = ab;
INIT_LIST_HEAD(&dp->reo_cmd_list);
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
return ret;
}
srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, srng, n_link_desc);
if (ret) {
ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
return ret;
}
ret = ath11k_dp_srng_common_setup(ab);
if (ret)
goto fail_link_desc_cleanup;
size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
dp->tx_ring[i].tcl_data_ring_id = i;
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
if (!dp->tx_ring[i].tx_status) {
ret = -ENOMEM;
goto fail_cmn_srng_cleanup;
}
}
for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
ath11k_hal_tx_set_dscp_tid_map(ab, i);
/* Init any SOC level resource for DP */
return 0;
fail_cmn_srng_cleanup:
ath11k_dp_srng_common_cleanup(ab);
fail_link_desc_cleanup:
ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
return ret;
}
static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
{
struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
t, timer);
struct ath11k_base *ab = update_timer->ab;
struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
spin_lock_bh(&srng->lock);
/* when the timer is fired, the handler checks whether there
* are new TX happened. The handler updates HP only when there
* are no TX operations during the timeout interval, and stop
* the timer. Timer will be started again when TX happens again.
*/
if (update_timer->timer_tx_num != update_timer->tx_num) {
update_timer->timer_tx_num = update_timer->tx_num;
mod_timer(&update_timer->timer, jiffies +
msecs_to_jiffies(update_timer->interval));
} else {
update_timer->started = false;
ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
}
spin_unlock_bh(&srng->lock);
}
void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
struct hal_srng *srng,
struct ath11k_hp_update_timer *update_timer)
{
lockdep_assert_held(&srng->lock);
if (!ab->hw_params.supports_shadow_regs)
return;
update_timer->tx_num++;
if (update_timer->started)
return;
update_timer->started = true;
update_timer->timer_tx_num = update_timer->tx_num;
mod_timer(&update_timer->timer, jiffies +
msecs_to_jiffies(update_timer->interval));
}
void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
struct ath11k_hp_update_timer *update_timer)
{
if (!ab->hw_params.supports_shadow_regs)
return;
if (!update_timer->init)
return;
del_timer_sync(&update_timer->timer);
}
void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
struct ath11k_hp_update_timer *update_timer,
u32 interval, u32 ring_id)
{
if (!ab->hw_params.supports_shadow_regs)
return;
update_timer->tx_num = 0;
update_timer->timer_tx_num = 0;
update_timer->ab = ab;
update_timer->ring_id = ring_id;
update_timer->interval = interval;
update_timer->init = true;
timer_setup(&update_timer->timer,
ath11k_dp_shadow_timer_handler, 0);
}
|
linux-master
|
drivers/net/wireless/ath/ath11k/dp.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
*/
#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
|
linux-master
|
drivers/net/wireless/ath/wil6210/trace.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/etherdevice.h>
#include <linux/moduleparam.h>
#include <linux/prefetch.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include "wil6210.h"
#include "txrx_edma.h"
#include "txrx.h"
#include "trace.h"
/* Max number of entries (packets to complete) to update the hwtail of tx
* status ring. Should be power of 2
*/
#define WIL_EDMA_TX_SRING_UPDATE_HW_TAIL 128
#define WIL_EDMA_MAX_DATA_OFFSET (2)
/* RX buffer size must be aligned to 4 bytes */
#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
#define MAX_INVALID_BUFF_ID_RETRY (3)
static void wil_tx_desc_unmap_edma(struct device *dev,
union wil_tx_desc *desc,
struct wil_ctx *ctx)
{
struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc;
dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma);
u16 dmalen = le16_to_cpu(d->dma.length);
switch (ctx->mapped_as) {
case wil_mapped_as_single:
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
break;
case wil_mapped_as_page:
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
break;
default:
break;
}
}
static int wil_find_free_sring(struct wil6210_priv *wil)
{
int i;
for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) {
if (!wil->srings[i].va)
return i;
}
return -EINVAL;
}
static void wil_sring_free(struct wil6210_priv *wil,
struct wil_status_ring *sring)
{
struct device *dev = wil_to_dev(wil);
size_t sz;
if (!sring || !sring->va)
return;
sz = sring->elem_size * sring->size;
wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
sz, sring->va, &sring->pa);
dma_free_coherent(dev, sz, (void *)sring->va, sring->pa);
sring->pa = 0;
sring->va = NULL;
}
static int wil_sring_alloc(struct wil6210_priv *wil,
struct wil_status_ring *sring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = sring->elem_size * sring->size;
wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz);
if (sz == 0) {
wil_err(wil, "Cannot allocate a zero size status ring\n");
return -EINVAL;
}
sring->swhead = 0;
/* Status messages are allocated and initialized to 0. This is necessary
* since DR bit should be initialized to 0.
*/
sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
if (!sring->va)
return -ENOMEM;
wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va,
&sring->pa);
return 0;
}
static int wil_tx_init_edma(struct wil6210_priv *wil)
{
int ring_id = wil_find_free_sring(wil);
struct wil_status_ring *sring;
int rc;
u16 status_ring_size;
if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
status_ring_size = 1 << wil->tx_status_ring_order;
wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n",
status_ring_size, ring_id);
if (ring_id < 0)
return ring_id;
/* Allocate Tx status ring. Tx descriptor rings will be
* allocated on WMI connect event
*/
sring = &wil->srings[ring_id];
sring->is_rx = false;
sring->size = status_ring_size;
sring->elem_size = sizeof(struct wil_ring_tx_status);
rc = wil_sring_alloc(wil, sring);
if (rc)
return rc;
rc = wil_wmi_tx_sring_cfg(wil, ring_id);
if (rc)
goto out_free;
sring->desc_rdy_pol = 1;
wil->tx_sring_idx = ring_id;
return 0;
out_free:
wil_sring_free(wil, sring);
return rc;
}
/* Allocate one skb for Rx descriptor RING */
static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
struct wil_ring *ring, u32 i)
{
struct device *dev = wil_to_dev(wil);
unsigned int sz = wil->rx_buf_len;
dma_addr_t pa;
u16 buff_id;
struct list_head *active = &wil->rx_buff_mgmt.active;
struct list_head *free = &wil->rx_buff_mgmt.free;
struct wil_rx_buff *rx_buff;
struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr;
struct sk_buff *skb;
struct wil_rx_enhanced_desc dd, *d = ⅆ
struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *)
&ring->va[i].rx.enhanced;
if (unlikely(list_empty(free))) {
wil->rx_buff_mgmt.free_list_empty_cnt++;
return -EAGAIN;
}
skb = dev_alloc_skb(sz);
if (unlikely(!skb))
return -ENOMEM;
skb_put(skb, sz);
/**
* Make sure that the network stack calculates checksum for packets
* which failed the HW checksum calculation
*/
skb->ip_summed = CHECKSUM_NONE;
pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
kfree_skb(skb);
return -ENOMEM;
}
/* Get the buffer ID - the index of the rx buffer in the buff_arr */
rx_buff = list_first_entry(free, struct wil_rx_buff, list);
buff_id = rx_buff->id;
/* Move a buffer from the free list to the active list */
list_move(&rx_buff->list, active);
buff_arr[buff_id].skb = skb;
wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
d->dma.length = cpu_to_le16(sz);
d->mac.buff_id = cpu_to_le16(buff_id);
*_d = *d;
/* Save the physical address in skb->cb for later use in dma_unmap */
memcpy(skb->cb, &pa, sizeof(pa));
return 0;
}
static inline
void wil_get_next_rx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
void *msg)
{
struct wil_rx_status_compressed *_msg;
_msg = (struct wil_rx_status_compressed *)
(sring->va + (sring->elem_size * sring->swhead));
*dr_bit = WIL_GET_BITS(_msg->d0, 31, 31);
/* make sure dr_bit is read before the rest of status msg */
rmb();
memcpy(msg, (void *)_msg, sring->elem_size);
}
static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
{
sring->swhead = (sring->swhead + 1) % sring->size;
if (sring->swhead == 0)
sring->desc_rdy_pol = 1 - sring->desc_rdy_pol;
}
static int wil_rx_refill_edma(struct wil6210_priv *wil)
{
struct wil_ring *ring = &wil->ring_rx;
u32 next_head;
int rc = 0;
ring->swtail = *ring->edma_rx_swtail.va;
for (; next_head = wil_ring_next_head(ring),
(next_head != ring->swtail);
ring->swhead = next_head) {
rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
if (unlikely(rc)) {
if (rc == -EAGAIN)
wil_dbg_txrx(wil, "No free buffer ID found\n");
else
wil_err_ratelimited(wil,
"Error %d in refill desc[%d]\n",
rc, ring->swhead);
break;
}
}
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
wil_w(wil, ring->hwtail, ring->swhead);
return rc;
}
static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
struct wil_ring *ring)
{
struct device *dev = wil_to_dev(wil);
struct list_head *active = &wil->rx_buff_mgmt.active;
dma_addr_t pa;
if (!wil->rx_buff_mgmt.buff_arr)
return;
while (!list_empty(active)) {
struct wil_rx_buff *rx_buff =
list_first_entry(active, struct wil_rx_buff, list);
struct sk_buff *skb = rx_buff->skb;
if (unlikely(!skb)) {
wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id);
} else {
rx_buff->skb = NULL;
memcpy(&pa, skb->cb, sizeof(pa));
dma_unmap_single(dev, pa, wil->rx_buf_len,
DMA_FROM_DEVICE);
kfree_skb(skb);
}
/* Move the buffer from the active to the free list */
list_move(&rx_buff->list, &wil->rx_buff_mgmt.free);
}
}
static void wil_free_rx_buff_arr(struct wil6210_priv *wil)
{
struct wil_ring *ring = &wil->ring_rx;
if (!wil->rx_buff_mgmt.buff_arr)
return;
/* Move all the buffers to the free list in case active list is
* not empty in order to release all SKBs before deleting the array
*/
wil_move_all_rx_buff_to_free_list(wil, ring);
kfree(wil->rx_buff_mgmt.buff_arr);
wil->rx_buff_mgmt.buff_arr = NULL;
}
static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
size_t size)
{
struct wil_rx_buff *buff_arr;
struct list_head *active = &wil->rx_buff_mgmt.active;
struct list_head *free = &wil->rx_buff_mgmt.free;
int i;
wil->rx_buff_mgmt.buff_arr = kcalloc(size + 1,
sizeof(struct wil_rx_buff),
GFP_KERNEL);
if (!wil->rx_buff_mgmt.buff_arr)
return -ENOMEM;
/* Set list heads */
INIT_LIST_HEAD(active);
INIT_LIST_HEAD(free);
/* Linkify the list.
* buffer id 0 should not be used (marks invalid id).
*/
buff_arr = wil->rx_buff_mgmt.buff_arr;
for (i = 1; i <= size; i++) {
list_add(&buff_arr[i].list, free);
buff_arr[i].id = i;
}
wil->rx_buff_mgmt.size = size + 1;
return 0;
}
static int wil_init_rx_sring(struct wil6210_priv *wil,
u16 status_ring_size,
size_t elem_size,
u16 ring_id)
{
struct wil_status_ring *sring = &wil->srings[ring_id];
int rc;
wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n",
status_ring_size, ring_id);
memset(&sring->rx_data, 0, sizeof(sring->rx_data));
sring->is_rx = true;
sring->size = status_ring_size;
sring->elem_size = elem_size;
rc = wil_sring_alloc(wil, sring);
if (rc)
return rc;
rc = wil_wmi_rx_sring_add(wil, ring_id);
if (rc)
goto out_free;
sring->desc_rdy_pol = 1;
return 0;
out_free:
wil_sring_free(wil, sring);
return rc;
}
static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
struct wil_ring *ring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = ring->size * sizeof(ring->va[0]);
wil_dbg_misc(wil, "alloc_desc_ring:\n");
BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
ring->swhead = 0;
ring->swtail = 0;
ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
if (!ring->ctx)
goto err;
ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
if (!ring->va)
goto err_free_ctx;
if (ring->is_rx) {
sz = sizeof(*ring->edma_rx_swtail.va);
ring->edma_rx_swtail.va =
dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
GFP_KERNEL);
if (!ring->edma_rx_swtail.va)
goto err_free_va;
}
wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n",
ring->is_rx ? "RX" : "TX",
ring->size, ring->va, &ring->pa, ring->ctx);
return 0;
err_free_va:
dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
(void *)ring->va, ring->pa);
ring->va = NULL;
err_free_ctx:
kfree(ring->ctx);
ring->ctx = NULL;
err:
return -ENOMEM;
}
static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
{
struct device *dev = wil_to_dev(wil);
size_t sz;
int ring_index = 0;
if (!ring->va)
return;
sz = ring->size * sizeof(ring->va[0]);
lockdep_assert_held(&wil->mutex);
if (ring->is_rx) {
wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
ring->size, ring->va,
&ring->pa, ring->ctx);
wil_move_all_rx_buff_to_free_list(wil, ring);
dma_free_coherent(dev, sizeof(*ring->edma_rx_swtail.va),
ring->edma_rx_swtail.va,
ring->edma_rx_swtail.pa);
goto out;
}
/* TX ring */
ring_index = ring - wil->ring_tx;
wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
ring_index, ring->size, ring->va,
&ring->pa, ring->ctx);
while (!wil_ring_is_empty(ring)) {
struct wil_ctx *ctx;
struct wil_tx_enhanced_desc dd, *d = ⅆ
struct wil_tx_enhanced_desc *_d =
(struct wil_tx_enhanced_desc *)
&ring->va[ring->swtail].tx.enhanced;
ctx = &ring->ctx[ring->swtail];
if (!ctx) {
wil_dbg_txrx(wil,
"ctx(%d) was already completed\n",
ring->swtail);
ring->swtail = wil_ring_next_tail(ring);
continue;
}
*d = *_d;
wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
ring->swtail = wil_ring_next_tail(ring);
}
out:
dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
kfree(ring->ctx);
ring->pa = 0;
ring->va = NULL;
ring->ctx = NULL;
}
static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size,
int status_ring_id)
{
struct wil_ring *ring = &wil->ring_rx;
int rc;
wil_dbg_misc(wil, "init RX desc ring\n");
ring->size = desc_ring_size;
ring->is_rx = true;
rc = wil_ring_alloc_desc_ring(wil, ring);
if (rc)
return rc;
rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id);
if (rc)
goto out_free;
return 0;
out_free:
wil_ring_free_edma(wil, ring);
return rc;
}
static void wil_get_reorder_params_edma(struct wil6210_priv *wil,
struct sk_buff *skb, int *tid,
int *cid, int *mid, u16 *seq,
int *mcast, int *retry)
{
struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
*tid = wil_rx_status_get_tid(s);
*cid = wil_rx_status_get_cid(s);
*mid = wil_rx_status_get_mid(s);
*seq = le16_to_cpu(wil_rx_status_get_seq(wil, s));
*mcast = wil_rx_status_get_mcast(s);
*retry = wil_rx_status_get_retry(s);
}
static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
int *security)
{
struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
*cid = wil_rx_status_get_cid(s);
*security = wil_rx_status_get_security(s);
}
static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
struct sk_buff *skb)
{
struct wil_rx_status_extended *st;
int cid, tid, key_id, mc;
struct wil_sta_info *s;
struct wil_tid_crypto_rx *c;
struct wil_tid_crypto_rx_single *cc;
const u8 *pn;
/* In HW reorder, HW is responsible for crypto check */
if (wil->use_rx_hw_reordering)
return 0;
st = wil_skb_rxstatus(skb);
cid = wil_rx_status_get_cid(st);
tid = wil_rx_status_get_tid(st);
key_id = wil_rx_status_get_key_id(st);
mc = wil_rx_status_get_mcast(st);
s = &wil->sta[cid];
c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
cc = &c->key_id[key_id];
pn = (u8 *)&st->ext.pn;
if (!cc->key_set) {
wil_err_ratelimited(wil,
"Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
cid, tid, mc, key_id);
return -EINVAL;
}
if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
wil_err_ratelimited(wil,
"Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
cid, tid, mc, key_id, pn, cc->pn);
return -EINVAL;
}
memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
return 0;
}
static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
{
struct wil_status_ring *sring;
struct wil_rx_status_extended msg1;
void *msg = &msg1;
u8 dr_bit;
int i;
for (i = 0; i < wil->num_rx_status_rings; i++) {
sring = &wil->srings[i];
if (!sring->va)
continue;
wil_get_next_rx_status_msg(sring, &dr_bit, msg);
/* Check if there are unhandled RX status messages */
if (dr_bit == sring->desc_rdy_pol)
return false;
}
return true;
}
static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
{
/* RX buffer size must be aligned to 4 bytes */
wil->rx_buf_len = rx_large_buf ?
WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
}
static int wil_rx_init_edma(struct wil6210_priv *wil, uint desc_ring_order)
{
u16 status_ring_size, desc_ring_size = 1 << desc_ring_order;
struct wil_ring *ring = &wil->ring_rx;
int rc;
size_t elem_size = wil->use_compressed_rx_status ?
sizeof(struct wil_rx_status_compressed) :
sizeof(struct wil_rx_status_extended);
int i;
/* In SW reorder one must use extended status messages */
if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
wil_err(wil,
"compressed RX status cannot be used with SW reorder\n");
return -EINVAL;
}
if (wil->rx_status_ring_order <= desc_ring_order)
/* make sure sring is larger than desc ring */
wil->rx_status_ring_order = desc_ring_order + 1;
if (wil->rx_buff_id_count <= desc_ring_size)
/* make sure we will not run out of buff_ids */
wil->rx_buff_id_count = desc_ring_size + 512;
if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
status_ring_size = 1 << wil->rx_status_ring_order;
wil_dbg_misc(wil,
"rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
desc_ring_size, status_ring_size, elem_size);
wil_rx_buf_len_init_edma(wil);
/* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
wil->num_rx_status_rings);
rc = wil_wmi_cfg_def_rx_offload(wil, wil->rx_buf_len);
if (rc)
return rc;
/* Allocate status ring */
for (i = 0; i < wil->num_rx_status_rings; i++) {
int sring_id = wil_find_free_sring(wil);
if (sring_id < 0) {
rc = -EFAULT;
goto err_free_status;
}
rc = wil_init_rx_sring(wil, status_ring_size, elem_size,
sring_id);
if (rc)
goto err_free_status;
}
/* Allocate descriptor ring */
rc = wil_init_rx_desc_ring(wil, desc_ring_size,
WIL_DEFAULT_RX_STATUS_RING_ID);
if (rc)
goto err_free_status;
if (wil->rx_buff_id_count >= status_ring_size) {
wil_info(wil,
"rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
wil->rx_buff_id_count, status_ring_size,
status_ring_size - 1);
wil->rx_buff_id_count = status_ring_size - 1;
}
/* Allocate Rx buffer array */
rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count);
if (rc)
goto err_free_desc;
/* Fill descriptor ring with credits */
rc = wil_rx_refill_edma(wil);
if (rc)
goto err_free_rx_buff_arr;
return 0;
err_free_rx_buff_arr:
wil_free_rx_buff_arr(wil);
err_free_desc:
wil_ring_free_edma(wil, ring);
err_free_status:
for (i = 0; i < wil->num_rx_status_rings; i++)
wil_sring_free(wil, &wil->srings[i]);
return rc;
}
static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
int size, int cid, int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct wil_ring *ring = &wil->ring_tx[ring_id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
lockdep_assert_held(&wil->mutex);
wil_dbg_misc(wil,
"init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
ring_id, cid, tid, wil->tx_sring_idx);
wil_tx_data_init(txdata);
ring->size = size;
rc = wil_ring_alloc_desc_ring(wil, ring);
if (rc)
goto out;
wil->ring2cid_tid[ring_id][0] = cid;
wil->ring2cid_tid[ring_id][1] = tid;
if (!vif->privacy)
txdata->dot1x_open = true;
rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid);
if (rc) {
wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n");
goto out_free;
}
if (txdata->dot1x_open && agg_wsize >= 0)
wil_addba_tx_request(wil, ring_id, agg_wsize);
return 0;
out_free:
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil_ring_free_edma(wil, ring);
wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
wil->ring2cid_tid[ring_id][1] = 0;
out:
return rc;
}
static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id,
int cid, int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
wil_err(wil, "ring modify is not supported for EDMA\n");
return -EOPNOTSUPP;
}
/* This function is used only for RX SW reorder */
static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
struct sk_buff *skb, struct wil_net_stats *stats)
{
u8 ftype;
u8 fc1;
int mid;
int tid;
u16 seq;
struct wil6210_vif *vif;
ftype = wil_rx_status_get_frame_type(wil, msg);
if (ftype == IEEE80211_FTYPE_DATA)
return 0;
fc1 = wil_rx_status_get_fc1(wil, msg);
mid = wil_rx_status_get_mid(msg);
tid = wil_rx_status_get_tid(msg);
seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg));
vif = wil->vifs[mid];
if (unlikely(!vif)) {
wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid);
return -EAGAIN;
}
wil_dbg_txrx(wil,
"Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
fc1, mid, cid, tid, seq);
if (stats)
stats->rx_non_data_frame++;
if (wil_is_back_req(fc1)) {
wil_dbg_txrx(wil,
"BAR: MID %d CID %d TID %d Seq 0x%03x\n",
mid, cid, tid, seq);
wil_rx_bar(wil, vif, cid, tid, seq);
} else {
u32 sz = wil->use_compressed_rx_status ?
sizeof(struct wil_rx_status_compressed) :
sizeof(struct wil_rx_status_extended);
/* print again all info. One can enable only this
* without overhead for printing every Rx frame
*/
wil_dbg_txrx(wil,
"Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
fc1, mid, cid, tid, seq);
wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
(const void *)msg, sz, false);
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
}
return -EAGAIN;
}
static int wil_rx_error_check_edma(struct wil6210_priv *wil,
struct sk_buff *skb,
struct wil_net_stats *stats)
{
int l2_rx_status;
void *msg = wil_skb_rxstatus(skb);
l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
if (l2_rx_status != 0) {
wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
l2_rx_status);
/* Due to HW issue, KEY error will trigger a MIC error */
if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) {
wil_err_ratelimited(wil,
"L2 MIC/KEY error, dropping packet\n");
stats->rx_mic_error++;
}
if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) {
wil_err_ratelimited(wil,
"L2 KEY error, dropping packet\n");
stats->rx_key_error++;
}
if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) {
wil_err_ratelimited(wil,
"L2 REPLAY error, dropping packet\n");
stats->rx_replay++;
}
if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) {
wil_err_ratelimited(wil,
"L2 AMSDU error, dropping packet\n");
stats->rx_amsdu_error++;
}
return -EFAULT;
}
skb->ip_summed = wil_rx_status_get_checksum(msg, stats);
return 0;
}
static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
struct wil_status_ring *sring)
{
struct device *dev = wil_to_dev(wil);
struct wil_rx_status_extended msg1;
void *msg = &msg1;
u16 buff_id;
struct sk_buff *skb;
dma_addr_t pa;
struct wil_ring_rx_data *rxdata = &sring->rx_data;
unsigned int sz = wil->rx_buf_len;
struct wil_net_stats *stats = NULL;
u16 dmalen;
int cid;
bool eop, headstolen;
int delta;
u8 dr_bit;
u8 data_offset;
struct wil_rx_status_extended *s;
u16 sring_idx = sring - wil->srings;
int invalid_buff_id_retry;
BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
again:
wil_get_next_rx_status_msg(sring, &dr_bit, msg);
/* Completed handling all the ready status messages */
if (dr_bit != sring->desc_rdy_pol)
return NULL;
/* Extract the buffer ID from the status message */
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
invalid_buff_id_retry = 0;
while (!buff_id) {
struct wil_rx_status_extended *s;
wil_dbg_txrx(wil,
"buff_id is not updated yet by HW, (swhead 0x%x)\n",
sring->swhead);
if (++invalid_buff_id_retry > MAX_INVALID_BUFF_ID_RETRY)
break;
/* Read the status message again */
s = (struct wil_rx_status_extended *)
(sring->va + (sring->elem_size * sring->swhead));
*(struct wil_rx_status_extended *)msg = *s;
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
}
if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
buff_id, sring->swhead);
print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
msg, wil->use_compressed_rx_status ?
sizeof(struct wil_rx_status_compressed) :
sizeof(struct wil_rx_status_extended), false);
wil_rx_status_reset_buff_id(sring);
wil_sring_advance_swhead(sring);
sring->invalid_buff_id_cnt++;
goto again;
}
/* Extract the SKB from the rx_buff management array */
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
if (!skb) {
wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
wil_rx_status_reset_buff_id(sring);
/* Move the buffer from the active list to the free list */
list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
&wil->rx_buff_mgmt.free);
wil_sring_advance_swhead(sring);
sring->invalid_buff_id_cnt++;
goto again;
}
wil_rx_status_reset_buff_id(sring);
wil_sring_advance_swhead(sring);
memcpy(&pa, skb->cb, sizeof(pa));
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id,
msg);
wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
buff_id, sring_idx, dmalen);
wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
(const void *)msg, wil->use_compressed_rx_status ?
sizeof(struct wil_rx_status_compressed) :
sizeof(struct wil_rx_status_extended), false);
/* Move the buffer from the active list to the free list */
list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
&wil->rx_buff_mgmt.free);
eop = wil_rx_status_get_eop(msg);
cid = wil_rx_status_get_cid(msg);
if (unlikely(!wil_val_in_range(cid, 0, wil->max_assoc_sta))) {
wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
cid, sring->swhead);
rxdata->skipping = true;
goto skipping;
}
stats = &wil->sta[cid].stats;
if (unlikely(dmalen < ETH_HLEN)) {
wil_dbg_txrx(wil, "Short frame, len = %d\n", dmalen);
stats->rx_short_frame++;
rxdata->skipping = true;
goto skipping;
}
if (unlikely(dmalen > sz)) {
wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
print_hex_dump(KERN_ERR, "RxS ", DUMP_PREFIX_OFFSET, 16, 1,
msg, wil->use_compressed_rx_status ?
sizeof(struct wil_rx_status_compressed) :
sizeof(struct wil_rx_status_extended), false);
stats->rx_large_frame++;
rxdata->skipping = true;
}
skipping:
/* skipping indicates if a certain SKB should be dropped.
* It is set in case there is an error on the current SKB or in case
* of RX chaining: as long as we manage to merge the SKBs it will
* be false. once we have a bad SKB or we don't manage to merge SKBs
* it will be set to the !EOP value of the current SKB.
* This guarantees that all the following SKBs until EOP will also
* get dropped.
*/
if (unlikely(rxdata->skipping)) {
kfree_skb(skb);
if (rxdata->skb) {
kfree_skb(rxdata->skb);
rxdata->skb = NULL;
}
rxdata->skipping = !eop;
goto again;
}
skb_trim(skb, dmalen);
prefetch(skb->data);
if (!rxdata->skb) {
rxdata->skb = skb;
} else {
if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
&delta))) {
kfree_skb_partial(skb, headstolen);
} else {
wil_err(wil, "failed to merge skbs!\n");
kfree_skb(skb);
kfree_skb(rxdata->skb);
rxdata->skb = NULL;
rxdata->skipping = !eop;
goto again;
}
}
if (!eop)
goto again;
/* reaching here rxdata->skb always contains a full packet */
skb = rxdata->skb;
rxdata->skb = NULL;
rxdata->skipping = false;
if (stats) {
stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
stats->rx_per_mcs[stats->last_mcs_rx]++;
else if (stats->last_mcs_rx == WIL_EXTENDED_MCS_26)
stats->rx_per_mcs[WIL_BASE_MCS_FOR_EXTENDED_26]++;
stats->last_cb_mode_rx = wil_rx_status_get_cb_mode(msg);
}
if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
kfree_skb(skb);
goto again;
}
/* Compensate for the HW data alignment according to the status
* message
*/
data_offset = wil_rx_status_get_data_offset(msg);
if (data_offset == 0xFF ||
data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
wil_err(wil, "Unexpected data offset %d\n", data_offset);
kfree_skb(skb);
goto again;
}
skb_pull(skb, data_offset);
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
/* Has to be done after dma_unmap_single as skb->cb is also
* used for holding the pa
*/
s = wil_skb_rxstatus(skb);
memcpy(s, msg, sring->elem_size);
return skb;
}
void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota)
{
struct net_device *ndev;
struct wil_ring *ring = &wil->ring_rx;
struct wil_status_ring *sring;
struct sk_buff *skb;
int i;
if (unlikely(!ring->va)) {
wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
return;
}
wil_dbg_txrx(wil, "rx_handle\n");
for (i = 0; i < wil->num_rx_status_rings; i++) {
sring = &wil->srings[i];
if (unlikely(!sring->va)) {
wil_err(wil,
"Rx IRQ while Rx status ring %d not yet initialized\n",
i);
continue;
}
while ((*quota > 0) &&
(NULL != (skb =
wil_sring_reap_rx_edma(wil, sring)))) {
(*quota)--;
if (wil->use_rx_hw_reordering) {
void *msg = wil_skb_rxstatus(skb);
int mid = wil_rx_status_get_mid(msg);
struct wil6210_vif *vif = wil->vifs[mid];
if (unlikely(!vif)) {
wil_dbg_txrx(wil,
"RX desc invalid mid %d",
mid);
kfree_skb(skb);
continue;
}
ndev = vif_to_ndev(vif);
wil_netif_rx_any(skb, ndev);
} else {
wil_rx_reorder(wil, skb);
}
}
wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
}
wil_rx_refill_edma(wil);
}
static int wil_tx_desc_map_edma(union wil_tx_desc *desc,
dma_addr_t pa,
u32 len,
int ring_index)
{
struct wil_tx_enhanced_desc *d =
(struct wil_tx_enhanced_desc *)&desc->enhanced;
memset(d, 0, sizeof(struct wil_tx_enhanced_desc));
wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
/* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
d->dma.length = cpu_to_le16((u16)len);
d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS);
/* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
* 3 - eth mode
*/
d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
(0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
return 0;
}
static inline void
wil_get_next_tx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
struct wil_ring_tx_status *msg)
{
struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
(sring->va + (sring->elem_size * sring->swhead));
*dr_bit = _msg->desc_ready >> TX_STATUS_DESC_READY_POS;
/* make sure dr_bit is read before the rest of status msg */
rmb();
*msg = *_msg;
}
/* Clean up transmitted skb's from the Tx descriptor RING.
* Return number of descriptors cleared.
*/
int wil_tx_sring_handler(struct wil6210_priv *wil,
struct wil_status_ring *sring)
{
struct net_device *ndev;
struct device *dev = wil_to_dev(wil);
struct wil_ring *ring = NULL;
struct wil_ring_tx_data *txdata;
/* Total number of completed descriptors in all descriptor rings */
int desc_cnt = 0;
int cid;
struct wil_net_stats *stats;
struct wil_tx_enhanced_desc *_d;
unsigned int ring_id;
unsigned int num_descs, num_statuses = 0;
int i;
u8 dr_bit; /* Descriptor Ready bit */
struct wil_ring_tx_status msg;
struct wil6210_vif *vif;
int used_before_complete;
int used_new;
wil_get_next_tx_status_msg(sring, &dr_bit, &msg);
/* Process completion messages while DR bit has the expected polarity */
while (dr_bit == sring->desc_rdy_pol) {
num_descs = msg.num_descriptors;
if (!num_descs) {
wil_err(wil, "invalid num_descs 0\n");
goto again;
}
/* Find the corresponding descriptor ring */
ring_id = msg.ring_id;
if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
wil_err(wil, "invalid ring id %d\n", ring_id);
goto again;
}
ring = &wil->ring_tx[ring_id];
if (unlikely(!ring->va)) {
wil_err(wil, "Tx irq[%d]: ring not initialized\n",
ring_id);
goto again;
}
txdata = &wil->ring_tx_data[ring_id];
if (unlikely(!txdata->enabled)) {
wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id);
goto again;
}
vif = wil->vifs[txdata->mid];
if (unlikely(!vif)) {
wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
txdata->mid, ring_id);
goto again;
}
ndev = vif_to_ndev(vif);
cid = wil->ring2cid_tid[ring_id][0];
stats = (cid < wil->max_assoc_sta) ? &wil->sta[cid].stats :
NULL;
wil_dbg_txrx(wil,
"tx_status: completed desc_ring (%d), num_descs (%d)\n",
ring_id, num_descs);
used_before_complete = wil_ring_used_tx(ring);
for (i = 0 ; i < num_descs; ++i) {
struct wil_ctx *ctx = &ring->ctx[ring->swtail];
struct wil_tx_enhanced_desc dd, *d = ⅆ
u16 dmalen;
struct sk_buff *skb = ctx->skb;
_d = (struct wil_tx_enhanced_desc *)
&ring->va[ring->swtail].tx.enhanced;
*d = *_d;
dmalen = le16_to_cpu(d->dma.length);
trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
wil_dbg_txrx(wil,
"TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
ring_id, ring->swtail, dmalen,
msg.status);
wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4,
(const void *)&msg, sizeof(msg),
false);
wil_tx_desc_unmap_edma(dev,
(union wil_tx_desc *)d,
ctx);
if (skb) {
if (likely(msg.status == 0)) {
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
if (stats) {
stats->tx_packets++;
stats->tx_bytes += skb->len;
wil_tx_latency_calc(wil, skb,
&wil->sta[cid]);
}
} else {
ndev->stats.tx_errors++;
if (stats)
stats->tx_errors++;
}
if (skb->protocol == cpu_to_be16(ETH_P_PAE))
wil_tx_complete_handle_eapol(vif, skb);
wil_consume_skb(skb, msg.status == 0);
}
memset(ctx, 0, sizeof(*ctx));
/* Make sure the ctx is zeroed before updating the tail
* to prevent a case where wil_tx_ring will see
* this descriptor as used and handle it before ctx zero
* is completed.
*/
wmb();
ring->swtail = wil_ring_next_tail(ring);
desc_cnt++;
}
/* performance monitoring */
used_new = wil_ring_used_tx(ring);
if (wil_val_in_range(wil->ring_idle_trsh,
used_new, used_before_complete)) {
wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
ring_id, used_before_complete, used_new);
txdata->last_idle = get_cycles();
}
again:
num_statuses++;
if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL == 0)
/* update HW tail to allow HW to push new statuses */
wil_w(wil, sring->hwtail, sring->swhead);
wil_sring_advance_swhead(sring);
wil_get_next_tx_status_msg(sring, &dr_bit, &msg);
}
/* shall we wake net queues? */
if (desc_cnt)
wil_update_net_queues(wil, vif, NULL, false);
if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL != 0)
/* Update the HW tail ptr (RD ptr) */
wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
return desc_cnt;
}
/* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
* @skb is used to obtain the protocol and headers length.
* @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
* 2 - middle, 3 - last descriptor.
*/
static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d,
int tso_desc_type, bool is_ipv4,
int tcp_hdr_len,
int skb_net_hdr_len,
int mss)
{
/* Number of descriptors */
d->mac.d[2] |= 1;
/* Maximum Segment Size */
d->mac.tso_mss |= cpu_to_le16(mss >> 2);
/* L4 header len: TCP header length */
d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK;
/* EOP, TSO desc type, Segmentation enable,
* Insert IPv4 and TCP / UDP Checksum
*/
d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) |
tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS |
BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) |
BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) |
BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS);
/* Calculate pseudo-header */
d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) |
BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS);
/* IP Header Length */
d->dma.ip_length |= skb_net_hdr_len;
/* MAC header length and IP address family*/
d->dma.b11 |= ETH_HLEN |
is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
}
static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr,
int len, uint i, int tso_desc_type,
skb_frag_t *frag, struct wil_ring *ring,
struct sk_buff *skb, bool is_ipv4,
int tcp_hdr_len, int skb_net_hdr_len,
int mss, int *descs_used)
{
struct device *dev = wil_to_dev(wil);
struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *)
&ring->va[i].tx.enhanced;
struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem;
int ring_index = ring - wil->ring_tx;
dma_addr_t pa;
if (len == 0)
return 0;
if (!frag) {
pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE);
ring->ctx[i].mapped_as = wil_mapped_as_single;
} else {
pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
ring->ctx[i].mapped_as = wil_mapped_as_page;
}
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "TSO: Skb DMA map error\n");
return -EINVAL;
}
wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa,
len, ring_index);
wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4,
tcp_hdr_len,
skb_net_hdr_len, mss);
/* hold reference to skb
* to prevent skb release before accounting
* in case of immediate "tx done"
*/
if (tso_desc_type == wil_tso_type_lst)
ring->ctx[i].skb = skb_get(skb);
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
*_desc = *d;
(*descs_used)++;
return 0;
}
static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct wil_ring *ring,
struct sk_buff *skb)
{
int ring_index = ring - wil->ring_tx;
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
int nr_frags = skb_shinfo(skb)->nr_frags;
int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
int used, avail = wil_ring_avail_tx(ring);
int f, hdrlen, headlen;
int gso_type;
bool is_ipv4;
u32 swhead = ring->swhead;
int descs_used = 0; /* total number of used descriptors */
int rc = -EINVAL;
int tcp_hdr_len;
int skb_net_hdr_len;
int mss = skb_shinfo(skb)->gso_size;
wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
ring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
if (unlikely(avail < min_desc_required)) {
wil_err_ratelimited(wil,
"TSO: Tx ring[%2d] full. No space for %d fragments\n",
ring_index, min_desc_required);
return -ENOMEM;
}
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
switch (gso_type) {
case SKB_GSO_TCPV4:
is_ipv4 = true;
break;
case SKB_GSO_TCPV6:
is_ipv4 = false;
break;
default:
return -EINVAL;
}
if (skb->ip_summed != CHECKSUM_PARTIAL)
return -EINVAL;
/* tcp header length and skb network header length are fixed for all
* packet's descriptors - read them once here
*/
tcp_hdr_len = tcp_hdrlen(skb);
skb_net_hdr_len = skb_network_header_len(skb);
/* First descriptor must contain the header only
* Header Length = MAC header len + IP header len + TCP header len
*/
hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
hdrlen);
rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
wil_tso_type_hdr, NULL, ring, skb,
is_ipv4, tcp_hdr_len, skb_net_hdr_len,
mss, &descs_used);
if (rc)
return -EINVAL;
/* Second descriptor contains the head */
headlen = skb_headlen(skb) - hdrlen;
wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen);
rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
(swhead + descs_used) % ring->size,
(nr_frags != 0) ? wil_tso_type_first :
wil_tso_type_lst, NULL, ring, skb,
is_ipv4, tcp_hdr_len, skb_net_hdr_len,
mss, &descs_used);
if (rc)
goto mem_error;
/* Rest of the descriptors are from the SKB fragments */
for (f = 0; f < nr_frags; f++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
int len = skb_frag_size(frag);
wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
len, descs_used);
rc = wil_tx_tso_gen_desc(wil, NULL, len,
(swhead + descs_used) % ring->size,
(f != nr_frags - 1) ?
wil_tso_type_mid : wil_tso_type_lst,
frag, ring, skb, is_ipv4,
tcp_hdr_len, skb_net_hdr_len,
mss, &descs_used);
if (rc)
goto mem_error;
}
/* performance monitoring */
used = wil_ring_used_tx(ring);
if (wil_val_in_range(wil->ring_idle_trsh,
used, used + descs_used)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
ring_index, used, used + descs_used);
}
/* advance swhead */
wil_ring_advance_head(ring, descs_used);
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
if (wil->tx_latency)
*(ktime_t *)&skb->cb = ktime_get();
else
memset(skb->cb, 0, sizeof(ktime_t));
wil_w(wil, ring->hwtail, ring->swhead);
return 0;
mem_error:
while (descs_used > 0) {
struct device *dev = wil_to_dev(wil);
struct wil_ctx *ctx;
int i = (swhead + descs_used - 1) % ring->size;
struct wil_tx_enhanced_desc dd, *d = ⅆ
struct wil_tx_enhanced_desc *_desc =
(struct wil_tx_enhanced_desc *)
&ring->va[i].tx.enhanced;
*d = *_desc;
ctx = &ring->ctx[i];
wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
memset(ctx, 0, sizeof(*ctx));
descs_used--;
}
return rc;
}
static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
int size)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wil_ring *ring = &wil->ring_tx[ring_id];
int rc;
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n",
ring_id, wil->tx_sring_idx);
lockdep_assert_held(&wil->mutex);
wil_tx_data_init(txdata);
ring->size = size;
ring->is_rx = false;
rc = wil_ring_alloc_desc_ring(wil, ring);
if (rc)
goto out;
wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */
wil->ring2cid_tid[ring_id][1] = 0; /* TID */
if (!vif->privacy)
txdata->dot1x_open = true;
rc = wil_wmi_bcast_desc_ring_add(vif, ring_id);
if (rc)
goto out_free;
return 0;
out_free:
spin_lock_bh(&txdata->lock);
txdata->enabled = 0;
txdata->dot1x_open = false;
spin_unlock_bh(&txdata->lock);
wil_ring_free_edma(wil, ring);
out:
return rc;
}
static void wil_tx_fini_edma(struct wil6210_priv *wil)
{
struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
wil_dbg_misc(wil, "free TX sring\n");
wil_sring_free(wil, sring);
}
static void wil_rx_data_free(struct wil_status_ring *sring)
{
if (!sring)
return;
kfree_skb(sring->rx_data.skb);
sring->rx_data.skb = NULL;
}
static void wil_rx_fini_edma(struct wil6210_priv *wil)
{
struct wil_ring *ring = &wil->ring_rx;
int i;
wil_dbg_misc(wil, "rx_fini_edma\n");
wil_ring_free_edma(wil, ring);
for (i = 0; i < wil->num_rx_status_rings; i++) {
wil_rx_data_free(&wil->srings[i]);
wil_sring_free(wil, &wil->srings[i]);
}
wil_free_rx_buff_arr(wil);
}
void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
{
wil->txrx_ops.configure_interrupt_moderation =
wil_configure_interrupt_moderation_edma;
/* TX ops */
wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma;
wil->txrx_ops.ring_fini_tx = wil_ring_free_edma;
wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma;
wil->txrx_ops.tx_init = wil_tx_init_edma;
wil->txrx_ops.tx_fini = wil_tx_fini_edma;
wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
wil->txrx_ops.tx_ring_modify = wil_tx_ring_modify_edma;
/* RX ops */
wil->txrx_ops.rx_init = wil_rx_init_edma;
wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma;
wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma;
wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma;
wil->txrx_ops.rx_error_check = wil_rx_error_check_edma;
wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma;
wil->txrx_ops.rx_fini = wil_rx_fini_edma;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/txrx_edma.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include "wmi.h"
#include "wil6210.h"
#include "txrx.h"
#include "pmc.h"
struct desc_alloc_info {
dma_addr_t pa;
void *va;
};
static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
{
return !!pmc->pring_va;
}
void wil_pmc_init(struct wil6210_priv *wil)
{
memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
mutex_init(&wil->pmc.lock);
}
/* Allocate the physical ring (p-ring) and the required
* number of descriptors of required size.
* Initialize the descriptors as required by pmc dma.
* The descriptors' buffers dwords are initialized to hold
* dword's serial number in the lsw and reserved value
* PCM_DATA_INVALID_DW_VAL in the msw.
*/
void wil_pmc_alloc(struct wil6210_priv *wil,
int num_descriptors,
int descriptor_size)
{
u32 i;
struct pmc_ctx *pmc = &wil->pmc;
struct device *dev = wil_to_dev(wil);
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wmi_pmc_cmd pmc_cmd = {0};
int last_cmd_err = -ENOMEM;
mutex_lock(&pmc->lock);
if (wil_is_pmc_allocated(pmc)) {
/* sanity check */
wil_err(wil, "ERROR pmc is already allocated\n");
goto no_release_err;
}
if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
wil_err(wil,
"Invalid params num_descriptors(%d), descriptor_size(%d)\n",
num_descriptors, descriptor_size);
last_cmd_err = -EINVAL;
goto no_release_err;
}
if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
wil_err(wil,
"num_descriptors(%d) exceeds max ring size %d\n",
num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
last_cmd_err = -EINVAL;
goto no_release_err;
}
if (num_descriptors > INT_MAX / descriptor_size) {
wil_err(wil,
"Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
num_descriptors, descriptor_size);
last_cmd_err = -EINVAL;
goto no_release_err;
}
pmc->num_descriptors = num_descriptors;
pmc->descriptor_size = descriptor_size;
wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
num_descriptors, descriptor_size);
/* allocate descriptors info list in pmc context*/
pmc->descriptors = kcalloc(num_descriptors,
sizeof(struct desc_alloc_info),
GFP_KERNEL);
if (!pmc->descriptors) {
wil_err(wil, "ERROR allocating pmc skb list\n");
goto no_release_err;
}
wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
pmc->descriptors);
/* Allocate pring buffer and descriptors.
* vring->va should be aligned on its size rounded up to power of 2
* This is granted by the dma_alloc_coherent.
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
* if we are using more than 32 bit addresses switch to 32 bit
* allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
pmc->pring_va = dma_alloc_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
&pmc->pring_pa,
GFP_KERNEL);
if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(wil->dma_addr_size));
wil_dbg_misc(wil,
"pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
pmc->pring_va, &pmc->pring_pa,
sizeof(struct vring_tx_desc),
num_descriptors,
sizeof(struct vring_tx_desc) * num_descriptors);
if (!pmc->pring_va) {
wil_err(wil, "ERROR allocating pmc pring\n");
goto release_pmc_skb_list;
}
/* initially, all descriptors are SW owned
* For Tx, Rx, and PMC, ownership bit is at the same location, thus
* we can use any
*/
for (i = 0; i < num_descriptors; i++) {
struct vring_tx_desc *_d = &pmc->pring_va[i];
struct vring_tx_desc dd = {}, *d = ⅆ
int j = 0;
pmc->descriptors[i].va = dma_alloc_coherent(dev,
descriptor_size,
&pmc->descriptors[i].pa,
GFP_KERNEL);
if (unlikely(!pmc->descriptors[i].va)) {
wil_err(wil, "ERROR allocating pmc descriptor %d", i);
goto release_pmc_skbs;
}
for (j = 0; j < descriptor_size / sizeof(u32); j++) {
u32 *p = (u32 *)pmc->descriptors[i].va + j;
*p = PCM_DATA_INVALID_DW_VAL | j;
}
/* configure dma descriptor */
d->dma.addr.addr_low =
cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
d->dma.addr.addr_high =
cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
d->dma.status = 0; /* 0 = HW_OWNED */
d->dma.length = cpu_to_le16(descriptor_size);
d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
*_d = *d;
}
wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
pmc_cmd.op = WMI_PMC_ALLOCATE;
pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
pmc->last_cmd_status = wmi_send(wil,
WMI_PMC_CMDID,
vif->mid,
&pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
"WMI_PMC_CMD with ALLOCATE op failed with status %d",
pmc->last_cmd_status);
goto release_pmc_skbs;
}
mutex_unlock(&pmc->lock);
return;
release_pmc_skbs:
wil_err(wil, "exit on error: Releasing skbs...\n");
for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
dma_free_coherent(dev,
descriptor_size,
pmc->descriptors[i].va,
pmc->descriptors[i].pa);
pmc->descriptors[i].va = NULL;
}
wil_err(wil, "exit on error: Releasing pring...\n");
dma_free_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
pmc->pring_va,
pmc->pring_pa);
pmc->pring_va = NULL;
release_pmc_skb_list:
wil_err(wil, "exit on error: Releasing descriptors info list...\n");
kfree(pmc->descriptors);
pmc->descriptors = NULL;
no_release_err:
pmc->last_cmd_status = last_cmd_err;
mutex_unlock(&pmc->lock);
}
/* Traverse the p-ring and release all buffers.
* At the end release the p-ring memory
*/
void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
{
struct pmc_ctx *pmc = &wil->pmc;
struct device *dev = wil_to_dev(wil);
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wmi_pmc_cmd pmc_cmd = {0};
mutex_lock(&pmc->lock);
pmc->last_cmd_status = 0;
if (!wil_is_pmc_allocated(pmc)) {
wil_dbg_misc(wil,
"pmc_free: Error, can't free - not allocated\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return;
}
if (send_pmc_cmd) {
wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
pmc_cmd.op = WMI_PMC_RELEASE;
pmc->last_cmd_status =
wmi_send(wil, WMI_PMC_CMDID, vif->mid,
&pmc_cmd, sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
"WMI_PMC_CMD with RELEASE op failed, status %d",
pmc->last_cmd_status);
/* There's nothing we can do with this error.
* Normally, it should never occur.
* Continue to freeing all memory allocated for pmc.
*/
}
}
if (pmc->pring_va) {
size_t buf_size = sizeof(struct vring_tx_desc) *
pmc->num_descriptors;
wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
pmc->pring_va);
dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
pmc->pring_va = NULL;
} else {
pmc->last_cmd_status = -ENOENT;
}
if (pmc->descriptors) {
int i;
for (i = 0;
i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
dma_free_coherent(dev,
pmc->descriptor_size,
pmc->descriptors[i].va,
pmc->descriptors[i].pa);
pmc->descriptors[i].va = NULL;
}
wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
pmc->num_descriptors);
wil_dbg_misc(wil,
"pmc_free: free pmc descriptors info list %p\n",
pmc->descriptors);
kfree(pmc->descriptors);
pmc->descriptors = NULL;
} else {
pmc->last_cmd_status = -ENOENT;
}
mutex_unlock(&pmc->lock);
}
/* Status of the last operation requested via debugfs: alloc/free/read.
* 0 - success or negative errno
*/
int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
wil->pmc.last_cmd_status);
return wil->pmc.last_cmd_status;
}
/* Read from required position up to the end of current descriptor,
* depends on descriptor size configured during alloc request.
*/
ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
struct wil6210_priv *wil = filp->private_data;
struct pmc_ctx *pmc = &wil->pmc;
size_t retval = 0;
unsigned long long idx;
loff_t offset;
size_t pmc_size;
mutex_lock(&pmc->lock);
if (!wil_is_pmc_allocated(pmc)) {
wil_err(wil, "error, pmc is not allocated!\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return -EPERM;
}
pmc_size = pmc->descriptor_size * pmc->num_descriptors;
wil_dbg_misc(wil,
"pmc_read: size %u, pos %lld\n",
(u32)count, *f_pos);
pmc->last_cmd_status = 0;
idx = *f_pos;
do_div(idx, pmc->descriptor_size);
offset = *f_pos - (idx * pmc->descriptor_size);
if (*f_pos >= pmc_size) {
wil_dbg_misc(wil,
"pmc_read: reached end of pmc buf: %lld >= %u\n",
*f_pos, (u32)pmc_size);
pmc->last_cmd_status = -ERANGE;
goto out;
}
wil_dbg_misc(wil,
"pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
*f_pos, idx, offset, count);
/* if no errors, return the copied byte count */
retval = simple_read_from_buffer(buf,
count,
&offset,
pmc->descriptors[idx].va,
pmc->descriptor_size);
*f_pos += retval;
out:
mutex_unlock(&pmc->lock);
return retval;
}
loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
{
loff_t newpos;
struct wil6210_priv *wil = filp->private_data;
struct pmc_ctx *pmc = &wil->pmc;
size_t pmc_size;
mutex_lock(&pmc->lock);
if (!wil_is_pmc_allocated(pmc)) {
wil_err(wil, "error, pmc is not allocated!\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return -EPERM;
}
pmc_size = pmc->descriptor_size * pmc->num_descriptors;
switch (whence) {
case 0: /* SEEK_SET */
newpos = off;
break;
case 1: /* SEEK_CUR */
newpos = filp->f_pos + off;
break;
case 2: /* SEEK_END */
newpos = pmc_size;
break;
default: /* can't happen */
newpos = -EINVAL;
goto out;
}
if (newpos < 0) {
newpos = -EINVAL;
goto out;
}
if (newpos > pmc_size)
newpos = pmc_size;
filp->f_pos = newpos;
out:
mutex_unlock(&pmc->lock);
return newpos;
}
int wil_pmcring_read(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct pmc_ctx *pmc = &wil->pmc;
size_t pmc_ring_size =
sizeof(struct vring_rx_desc) * pmc->num_descriptors;
mutex_lock(&pmc->lock);
if (!wil_is_pmc_allocated(pmc)) {
wil_err(wil, "error, pmc is not allocated!\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return -EPERM;
}
wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size);
seq_write(s, pmc->pring_va, pmc_ring_size);
mutex_unlock(&pmc->lock);
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/pmc.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/interrupt.h>
#include "wil6210.h"
#include "trace.h"
/*
* Theory of operation:
*
* There is ISR pseudo-cause register,
* dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
* Its bits represents OR'ed bits from 3 real ISR registers:
* TX, RX, and MISC.
*
* Registers may be configured to either "write 1 to clear" or
* "clear on read" mode
*
* When handling interrupt, one have to mask/unmask interrupts for the
* real ISR registers, or hardware may malfunction.
*
*/
#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
#define WIL6210_IRQ_DISABLE_NO_HALP (0xF7FFFFFFUL)
#define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \
BIT_DMA_EP_RX_ICR_RX_HTRSH)
#define WIL6210_IMC_RX_NO_RX_HTRSH (WIL6210_IMC_RX & \
(~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
#define WIL6210_IMC_TX_EDMA BIT_TX_STATUS_IRQ
#define WIL6210_IMC_RX_EDMA BIT_RX_STATUS_IRQ
#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
ISR_MISC_MBOX_EVT | \
ISR_MISC_FW_ERROR)
#define WIL6210_IMC_MISC (WIL6210_IMC_MISC_NO_HALP | \
BIT_DMA_EP_MISC_ICR_HALP)
#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
BIT_DMA_PSEUDO_CAUSE_TX | \
BIT_DMA_PSEUDO_CAUSE_MISC))
#if defined(CONFIG_WIL6210_ISR_COR)
/* configure to Clear-On-Read mode */
#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
#define WIL_ICR_ICC_MISC_VALUE (0xF7FFFFFFUL)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
}
#else /* defined(CONFIG_WIL6210_ISR_COR) */
/* configure to Write-1-to-Clear mode */
#define WIL_ICR_ICC_VALUE (0UL)
#define WIL_ICR_ICC_MISC_VALUE (0UL)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
writel(x, addr);
}
#endif /* defined(CONFIG_WIL6210_ISR_COR) */
static inline u32 wil_ioread32_and_clear(void __iomem *addr)
{
u32 x = readl(addr);
wil_icr_clear(x, addr);
return x;
}
static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
{
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil)
{
wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
{
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil)
{
wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
mask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
}
void wil6210_mask_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "mask_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
BIT_DMA_EP_MISC_ICR_HALP);
}
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "mask_irq_pseudo\n");
wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
clear_bit(wil_status_irqen, wil->status);
}
void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
{
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_TX);
}
void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil)
{
wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_TX_EDMA);
}
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0;
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
}
void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil)
{
wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_RX_EDMA);
}
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
unmask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
unmask_halp ? WIL6210_IMC_MISC : WIL6210_IMC_MISC_NO_HALP);
}
static void wil6210_unmask_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "unmask_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
BIT_DMA_EP_MISC_ICR_HALP);
}
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "unmask_irq_pseudo\n");
set_bit(wil_status_irqen, wil->status);
wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
}
void wil_mask_irq(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
wil6210_mask_irq_tx_edma(wil);
wil6210_mask_irq_rx(wil);
wil6210_mask_irq_rx_edma(wil);
wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
void wil_unmask_irq(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "unmask_irq\n");
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_MISC_VALUE);
wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil6210_unmask_irq_pseudo(wil);
if (wil->use_enhanced_dma_hw) {
wil6210_unmask_irq_tx_edma(wil);
wil6210_unmask_irq_rx_edma(wil);
} else {
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_rx(wil);
}
wil6210_unmask_irq_misc(wil, true);
}
void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
{
u32 moderation;
wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
/* Update RX and TX moderation */
moderation = wil->rx_max_burst_duration |
(WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
/* Treat special events as regular
* (set bit 0 to 0x1 and clear bits 1-8)
*/
wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
}
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
wil_dbg_irq(wil, "configure_interrupt_moderation\n");
/* disable interrupt moderation for monitor
* to get better timestamp precision
*/
if (wdev->iftype == NL80211_IFTYPE_MONITOR)
return;
/* Disable and clear tx counter before (re)configuration */
wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
wil->tx_max_burst_duration);
/* Configure TX max burst duration timer to use usec units */
wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear tx idle counter before (re)configuration */
wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
wil->tx_interframe_timeout);
/* Configure TX max burst duration timer to use usec units */
wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear rx counter before (re)configuration */
wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
wil->rx_max_burst_duration);
/* Configure TX max burst duration timer to use usec units */
wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear rx idle counter before (re)configuration */
wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
wil->rx_interframe_timeout);
/* Configure TX max burst duration timer to use usec units */
wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
}
static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
u32 isr;
bool need_unmask = true;
wil6210_mask_irq_rx(wil);
isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err_ratelimited(wil, "spurious IRQ: RX\n");
wil6210_unmask_irq_rx(wil);
return IRQ_NONE;
}
/* RX_DONE and RX_HTRSH interrupts are the same if interrupt
* moderation is not used. Interrupt moderation may cause RX
* buffer overflow while RX_DONE is delayed. The required
* action is always the same - should empty the accumulated
* packets from the RX ring.
*/
if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
wil_dbg_irq(wil, "RX done / RX_HTRSH received, ISR (0x%x)\n",
isr);
isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH);
if (likely(test_bit(wil_status_fwready, wil->status))) {
if (likely(test_bit(wil_status_napi_en, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_rx);
} else {
wil_err_ratelimited(
wil,
"Got Rx interrupt while stopping interface\n");
}
} else {
wil_err_ratelimited(wil, "Got Rx interrupt while in reset\n");
}
}
if (unlikely(isr))
wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
/* Rx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_rx);
if (unlikely(need_unmask))
wil6210_unmask_irq_rx(wil);
return IRQ_HANDLED;
}
static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
u32 isr;
bool need_unmask = true;
wil6210_mask_irq_rx_edma(wil);
isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: RX\n");
wil6210_unmask_irq_rx_edma(wil);
return IRQ_NONE;
}
if (likely(isr & BIT_RX_STATUS_IRQ)) {
wil_dbg_irq(wil, "RX status ring\n");
isr &= ~BIT_RX_STATUS_IRQ;
if (likely(test_bit(wil_status_fwready, wil->status))) {
if (likely(test_bit(wil_status_napi_en, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_rx);
} else {
wil_err(wil,
"Got Rx interrupt while stopping interface\n");
}
} else {
wil_err(wil, "Got Rx interrupt while in reset\n");
}
}
if (unlikely(isr))
wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
/* Rx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_rx);
if (unlikely(need_unmask))
wil6210_unmask_irq_rx_edma(wil);
return IRQ_HANDLED;
}
static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
u32 isr;
bool need_unmask = true;
wil6210_mask_irq_tx_edma(wil);
isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: TX\n");
wil6210_unmask_irq_tx_edma(wil);
return IRQ_NONE;
}
if (likely(isr & BIT_TX_STATUS_IRQ)) {
wil_dbg_irq(wil, "TX status ring\n");
isr &= ~BIT_TX_STATUS_IRQ;
if (likely(test_bit(wil_status_fwready, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_tx);
} else {
wil_err(wil, "Got Tx status ring IRQ while in reset\n");
}
}
if (unlikely(isr))
wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
/* Tx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_tx);
if (unlikely(need_unmask))
wil6210_unmask_irq_tx_edma(wil);
return IRQ_HANDLED;
}
static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
u32 isr;
bool need_unmask = true;
wil6210_mask_irq_tx(wil);
isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err_ratelimited(wil, "spurious IRQ: TX\n");
wil6210_unmask_irq_tx(wil);
return IRQ_NONE;
}
if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
wil_dbg_irq(wil, "TX done\n");
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
/* clear also all VRING interrupts */
isr &= ~(BIT(25) - 1UL);
if (likely(test_bit(wil_status_fwready, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_tx);
} else {
wil_err_ratelimited(wil, "Got Tx interrupt while in reset\n");
}
}
if (unlikely(isr))
wil_err_ratelimited(wil, "un-handled TX ISR bits 0x%08x\n",
isr);
/* Tx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_tx);
if (unlikely(need_unmask))
wil6210_unmask_irq_tx(wil);
return IRQ_HANDLED;
}
static void wil_notify_fw_error(struct wil6210_priv *wil)
{
struct device *dev = &wil->main_ndev->dev;
char *envp[3] = {
[0] = "SOURCE=wil6210",
[1] = "EVENT=FW_ERROR",
[2] = NULL,
};
wil_err(wil, "Notify about firmware error\n");
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
static void wil_cache_mbox_regs(struct wil6210_priv *wil)
{
/* make shadow copy of registers that should not change on run time */
wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
sizeof(struct wil6210_mbox_ctl));
wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
}
static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
{
size_t min_size = sizeof(struct wil6210_mbox_hdr) +
sizeof(struct wmi_cmd_hdr);
if (wil->mbox_ctl.rx.entry_size < min_size) {
wil_err(wil, "rx mbox entry too small (%d)\n",
wil->mbox_ctl.rx.entry_size);
return false;
}
if (wil->mbox_ctl.tx.entry_size < min_size) {
wil_err(wil, "tx mbox entry too small (%d)\n",
wil->mbox_ctl.tx.entry_size);
return false;
}
return true;
}
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
u32 isr;
wil6210_mask_irq_misc(wil, false);
isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
trace_wil6210_irq_misc(isr);
wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
if (!isr) {
wil_err(wil, "spurious IRQ: MISC\n");
wil6210_unmask_irq_misc(wil, false);
return IRQ_NONE;
}
if (isr & ISR_MISC_FW_ERROR) {
u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
u32 ucode_assert_code =
wil_r(wil, wil->rgf_ucode_assert_code_addr);
wil_err(wil,
"Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
fw_assert_code, ucode_assert_code);
clear_bit(wil_status_fwready, wil->status);
/*
* do not clear @isr here - we do 2-nd part in thread
* there, user space get notified, and it should be done
* in non-atomic context
*/
}
if (isr & ISR_MISC_FW_READY) {
wil_dbg_irq(wil, "IRQ: FW ready\n");
wil_cache_mbox_regs(wil);
if (wil_validate_mbox_regs(wil))
set_bit(wil_status_mbox_ready, wil->status);
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
*/
isr &= ~ISR_MISC_FW_READY;
}
if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
if (wil->halp.handle_icr) {
/* no need to handle HALP ICRs until next vote */
wil->halp.handle_icr = false;
wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
wil6210_mask_irq_misc(wil, true);
complete(&wil->halp.comp);
}
}
wil->isr_misc = isr;
if (isr) {
return IRQ_WAKE_THREAD;
} else {
wil6210_unmask_irq_misc(wil, false);
return IRQ_HANDLED;
}
}
static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
u32 isr = wil->isr_misc;
trace_wil6210_irq_misc_thread(isr);
wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
if (isr & ISR_MISC_FW_ERROR) {
wil->recovery_state = fw_recovery_pending;
wil_fw_core_dump(wil);
wil_notify_fw_error(wil);
isr &= ~ISR_MISC_FW_ERROR;
if (wil->platform_ops.notify) {
wil_err(wil, "notify platform driver about FW crash");
wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_CRASH);
} else {
wil_fw_error_recovery(wil);
}
}
if (isr & ISR_MISC_MBOX_EVT) {
wil_dbg_irq(wil, "MBOX event\n");
wmi_recv_cmd(wil);
isr &= ~ISR_MISC_MBOX_EVT;
}
if (isr)
wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
wil->isr_misc = 0;
wil6210_unmask_irq_misc(wil, false);
/* in non-triple MSI case, this is done inside wil6210_thread_irq
* because it has to be done after unmasking the pseudo.
*/
if (wil->n_msi == 3 && wil->suspend_resp_rcvd) {
wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
wil->suspend_resp_comp = true;
wake_up_interruptible(&wil->wq);
}
return IRQ_HANDLED;
}
/* thread IRQ handler */
static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
wil_dbg_irq(wil, "Thread IRQ\n");
/* Discover real IRQ cause */
if (wil->isr_misc)
wil6210_irq_misc_thread(irq, cookie);
wil6210_unmask_irq_pseudo(wil);
if (wil->suspend_resp_rcvd) {
wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
wil->suspend_resp_comp = true;
wake_up_interruptible(&wil->wq);
}
return IRQ_HANDLED;
}
/* DEBUG
* There is subtle bug in hardware that causes IRQ to raise when it should be
* masked. It is quite rare and hard to debug.
*
* Catch irq issue if it happens and print all I can.
*/
static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
{
u32 icm_rx, icr_rx, imv_rx;
u32 icm_tx, icr_tx, imv_tx;
u32 icm_misc, icr_misc, imv_misc;
if (!test_bit(wil_status_irqen, wil->status)) {
if (wil->use_enhanced_dma_hw) {
icm_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_RX_ICR) +
offsetof(struct RGF_ICR, ICM));
icr_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR +
offsetof(struct RGF_ICR, IMV));
icm_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_TX_ICR) +
offsetof(struct RGF_ICR, ICM));
icr_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR +
offsetof(struct RGF_ICR, IMV));
} else {
icm_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICM));
icr_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
offsetof(struct RGF_ICR, IMV));
icm_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICM));
icr_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
offsetof(struct RGF_ICR, IMV));
}
icm_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICM));
icr_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
offsetof(struct RGF_ICR, IMV));
/* HALP interrupt can be unmasked when misc interrupts are
* masked
*/
if (icr_misc & BIT_DMA_EP_MISC_ICR_HALP)
return 0;
wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
"Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
"Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
"Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
pseudo_cause,
icm_rx, icr_rx, imv_rx,
icm_tx, icr_tx, imv_tx,
icm_misc, icr_misc, imv_misc);
return -EINVAL;
}
return 0;
}
static irqreturn_t wil6210_hardirq(int irq, void *cookie)
{
irqreturn_t rc = IRQ_HANDLED;
struct wil6210_priv *wil = cookie;
u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
/**
* pseudo_cause is Clear-On-Read, no need to ACK
*/
if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
return IRQ_NONE;
/* IRQ mask debug */
if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
return IRQ_NONE;
trace_wil6210_irq_pseudo(pseudo_cause);
wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
wil6210_mask_irq_pseudo(wil);
/* Discover real IRQ cause
* There are 2 possible phases for every IRQ:
* - hard IRQ handler called right here
* - threaded handler called later
*
* Hard IRQ handler reads and clears ISR.
*
* If threaded handler requested, hard IRQ handler
* returns IRQ_WAKE_THREAD and saves ISR register value
* for the threaded handler use.
*
* voting for wake thread - need at least 1 vote
*/
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
(wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
(wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
(wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
/* if thread is requested, it will unmask IRQ */
if (rc != IRQ_WAKE_THREAD)
wil6210_unmask_irq_pseudo(wil);
return rc;
}
static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
{
int rc;
/* IRQ's are in the following order:
* - Tx
* - Rx
* - Misc
*/
rc = request_irq(irq, wil->txrx_ops.irq_tx, IRQF_SHARED,
WIL_NAME "_tx", wil);
if (rc)
return rc;
rc = request_irq(irq + 1, wil->txrx_ops.irq_rx, IRQF_SHARED,
WIL_NAME "_rx", wil);
if (rc)
goto free0;
rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
wil6210_irq_misc_thread,
IRQF_SHARED, WIL_NAME "_misc", wil);
if (rc)
goto free1;
return 0;
free1:
free_irq(irq + 1, wil);
free0:
free_irq(irq, wil);
return rc;
}
/* can't use wil_ioread32_and_clear because ICC value is not set yet */
static inline void wil_clear32(void __iomem *addr)
{
u32 x = readl(addr);
writel(x, addr);
}
void wil6210_clear_irq(struct wil6210_priv *wil)
{
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
wmb(); /* make sure write completed */
}
void wil6210_set_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "set_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
BIT_DMA_EP_MISC_ICR_HALP);
}
void wil6210_clear_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "clear_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
BIT_DMA_EP_MISC_ICR_HALP);
wil6210_unmask_halp(wil);
}
int wil6210_init_irq(struct wil6210_priv *wil, int irq)
{
int rc;
wil_dbg_misc(wil, "init_irq: %s, n_msi=%d\n",
wil->n_msi ? "MSI" : "INTx", wil->n_msi);
if (wil->use_enhanced_dma_hw) {
wil->txrx_ops.irq_tx = wil6210_irq_tx_edma;
wil->txrx_ops.irq_rx = wil6210_irq_rx_edma;
} else {
wil->txrx_ops.irq_tx = wil6210_irq_tx;
wil->txrx_ops.irq_rx = wil6210_irq_rx;
}
if (wil->n_msi == 3)
rc = wil6210_request_3msi(wil, irq);
else
rc = request_threaded_irq(irq, wil6210_hardirq,
wil6210_thread_irq,
wil->n_msi ? 0 : IRQF_SHARED,
WIL_NAME, wil);
return rc;
}
void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
{
wil_dbg_misc(wil, "fini_irq:\n");
wil_mask_irq(wil);
free_irq(irq, wil);
if (wil->n_msi == 3) {
free_irq(irq + 1, wil);
free_irq(irq + 2, wil);
}
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/interrupt.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/etherdevice.h>
#include <linux/moduleparam.h>
#include <net/netlink.h>
#include <net/cfg80211.h>
#include "wil6210.h"
#include "wmi.h"
#include "fw.h"
#define WIL_MAX_ROC_DURATION_MS 5000
#define WIL_EDMG_CHANNEL_9_SUBCHANNELS (BIT(0) | BIT(1))
#define WIL_EDMG_CHANNEL_10_SUBCHANNELS (BIT(1) | BIT(2))
#define WIL_EDMG_CHANNEL_11_SUBCHANNELS (BIT(2) | BIT(3))
/* WIL_EDMG_BW_CONFIGURATION define the allowed channel bandwidth
* configurations as defined by IEEE 802.11 section 9.4.2.251, Table 13.
* The value 5 allowing CB1 and CB2 of adjacent channels.
*/
#define WIL_EDMG_BW_CONFIGURATION 5
/* WIL_EDMG_CHANNELS is a bitmap that indicates the 2.16 GHz channel(s) that
* are allowed to be used for EDMG transmissions in the BSS as defined by
* IEEE 802.11 section 9.4.2.251.
*/
#define WIL_EDMG_CHANNELS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
bool disable_ap_sme;
module_param(disable_ap_sme, bool, 0444);
MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
#ifdef CONFIG_PM
static struct wiphy_wowlan_support wil_wowlan_support = {
.flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
};
#endif
#define CHAN60G(_channel, _flags) { \
.band = NL80211_BAND_60GHZ, \
.center_freq = 56160 + (2160 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 40, \
}
static struct ieee80211_channel wil_60ghz_channels[] = {
CHAN60G(1, 0),
CHAN60G(2, 0),
CHAN60G(3, 0),
CHAN60G(4, 0),
};
/* Rx channel bonding mode */
enum wil_rx_cb_mode {
WIL_RX_CB_MODE_DMG,
WIL_RX_CB_MODE_EDMG,
WIL_RX_CB_MODE_WIDE,
};
static int wil_rx_cb_mode_to_n_bonded(u8 cb_mode)
{
switch (cb_mode) {
case WIL_RX_CB_MODE_DMG:
case WIL_RX_CB_MODE_EDMG:
return 1;
case WIL_RX_CB_MODE_WIDE:
return 2;
default:
return 1;
}
}
static int wil_tx_cb_mode_to_n_bonded(u8 cb_mode)
{
switch (cb_mode) {
case WMI_TX_MODE_DMG:
case WMI_TX_MODE_EDMG_CB1:
return 1;
case WMI_TX_MODE_EDMG_CB2:
return 2;
default:
return 1;
}
}
static void
wil_memdup_ie(u8 **pdst, size_t *pdst_len, const u8 *src, size_t src_len)
{
kfree(*pdst);
*pdst = NULL;
*pdst_len = 0;
if (src_len > 0) {
*pdst = kmemdup(src, src_len, GFP_KERNEL);
if (*pdst)
*pdst_len = src_len;
}
}
static int wil_num_supported_channels(struct wil6210_priv *wil)
{
int num_channels = ARRAY_SIZE(wil_60ghz_channels);
if (!test_bit(WMI_FW_CAPABILITY_CHANNEL_4, wil->fw_capabilities))
num_channels--;
return num_channels;
}
void update_supported_bands(struct wil6210_priv *wil)
{
struct wiphy *wiphy = wil_to_wiphy(wil);
wil_dbg_misc(wil, "update supported bands");
wiphy->bands[NL80211_BAND_60GHZ]->n_channels =
wil_num_supported_channels(wil);
if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) {
wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.channels =
WIL_EDMG_CHANNELS;
wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.bw_config =
WIL_EDMG_BW_CONFIGURATION;
}
}
/* Vendor id to be used in vendor specific command and events
* to user space.
* NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
* vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and
* qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in
* git://w1.fi/srv/git/hostap.git; the values here are just a copy of that
*/
#define QCA_NL80211_VENDOR_ID 0x001374
#define WIL_MAX_RF_SECTORS (128)
#define WIL_CID_ALL (0xff)
enum qca_wlan_vendor_attr_rf_sector {
QCA_ATTR_MAC_ADDR = 6,
QCA_ATTR_PAD = 13,
QCA_ATTR_TSF = 29,
QCA_ATTR_DMG_RF_SECTOR_INDEX = 30,
QCA_ATTR_DMG_RF_SECTOR_TYPE = 31,
QCA_ATTR_DMG_RF_MODULE_MASK = 32,
QCA_ATTR_DMG_RF_SECTOR_CFG = 33,
QCA_ATTR_DMG_RF_SECTOR_MAX,
};
enum qca_wlan_vendor_attr_dmg_rf_sector_type {
QCA_ATTR_DMG_RF_SECTOR_TYPE_RX,
QCA_ATTR_DMG_RF_SECTOR_TYPE_TX,
QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX
};
enum qca_wlan_vendor_attr_dmg_rf_sector_cfg {
QCA_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0,
QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
/* keep last */
QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST,
QCA_ATTR_DMG_RF_SECTOR_CFG_MAX =
QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1
};
static const struct
nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = {
[QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN },
[QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 },
[QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 },
[QCA_ATTR_DMG_RF_MODULE_MASK] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG] = { .type = NLA_NESTED },
};
static const struct
nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = {
[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] = { .type = NLA_U8 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] = { .type = NLA_U32 },
[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 },
};
enum qca_nl80211_vendor_subcmds {
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139,
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140,
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141,
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142,
};
static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len);
static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len);
static int wil_rf_sector_get_selected(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len);
static int wil_rf_sector_set_selected(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len);
/* vendor specific commands */
static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.policy = wil_rf_sector_policy,
.doit = wil_rf_sector_get_cfg
},
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.policy = wil_rf_sector_policy,
.doit = wil_rf_sector_set_cfg
},
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd =
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.policy = wil_rf_sector_policy,
.doit = wil_rf_sector_get_selected
},
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd =
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.policy = wil_rf_sector_policy,
.doit = wil_rf_sector_set_selected
},
};
static struct ieee80211_supported_band wil_band_60ghz = {
.channels = wil_60ghz_channels,
.n_channels = ARRAY_SIZE(wil_60ghz_channels),
.ht_cap = {
.ht_supported = true,
.cap = 0, /* TODO */
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
.mcs = {
/* MCS 1..12 - SC PHY */
.rx_mask = {0xfe, 0x1f}, /* 1..12 */
.tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
},
},
};
static const struct ieee80211_txrx_stypes
wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_AP] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4) |
BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_REASSOC_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4)
},
[NL80211_IFTYPE_P2P_CLIENT] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_GO] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_DEVICE] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
};
static const u32 wil_cipher_suites[] = {
WLAN_CIPHER_SUITE_GCMP,
};
static const char * const key_usage_str[] = {
[WMI_KEY_USE_PAIRWISE] = "PTK",
[WMI_KEY_USE_RX_GROUP] = "RX_GTK",
[WMI_KEY_USE_TX_GROUP] = "TX_GTK",
[WMI_KEY_USE_STORE_PTK] = "STORE_PTK",
[WMI_KEY_USE_APPLY_PTK] = "APPLY_PTK",
};
int wil_iftype_nl2wmi(enum nl80211_iftype type)
{
static const struct {
enum nl80211_iftype nl;
enum wmi_network_type wmi;
} __nl2wmi[] = {
{NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC},
{NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA},
{NL80211_IFTYPE_AP, WMI_NETTYPE_AP},
{NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P},
{NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P},
{NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */
};
uint i;
for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
if (__nl2wmi[i].nl == type)
return __nl2wmi[i].wmi;
}
return -EOPNOTSUPP;
}
int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch)
{
switch (spec_ch) {
case 1:
*wmi_ch = WMI_CHANNEL_1;
break;
case 2:
*wmi_ch = WMI_CHANNEL_2;
break;
case 3:
*wmi_ch = WMI_CHANNEL_3;
break;
case 4:
*wmi_ch = WMI_CHANNEL_4;
break;
case 5:
*wmi_ch = WMI_CHANNEL_5;
break;
case 6:
*wmi_ch = WMI_CHANNEL_6;
break;
case 9:
*wmi_ch = WMI_CHANNEL_9;
break;
case 10:
*wmi_ch = WMI_CHANNEL_10;
break;
case 11:
*wmi_ch = WMI_CHANNEL_11;
break;
case 12:
*wmi_ch = WMI_CHANNEL_12;
break;
default:
return -EINVAL;
}
return 0;
}
int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch)
{
switch (wmi_ch) {
case WMI_CHANNEL_1:
*spec_ch = 1;
break;
case WMI_CHANNEL_2:
*spec_ch = 2;
break;
case WMI_CHANNEL_3:
*spec_ch = 3;
break;
case WMI_CHANNEL_4:
*spec_ch = 4;
break;
case WMI_CHANNEL_5:
*spec_ch = 5;
break;
case WMI_CHANNEL_6:
*spec_ch = 6;
break;
case WMI_CHANNEL_9:
*spec_ch = 9;
break;
case WMI_CHANNEL_10:
*spec_ch = 10;
break;
case WMI_CHANNEL_11:
*spec_ch = 11;
break;
case WMI_CHANNEL_12:
*spec_ch = 12;
break;
default:
return -EINVAL;
}
return 0;
}
int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
struct station_info *sinfo)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_notify_req_cmd cmd = {
.cid = cid,
.interval_usec = 0,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_notify_req_done_event evt;
} __packed reply;
struct wil_net_stats *stats = &wil->sta[cid].stats;
int rc;
u8 tx_mcs, rx_mcs;
u8 tx_rate_flag = RATE_INFO_FLAGS_DMG;
u8 rx_rate_flag = RATE_INFO_FLAGS_DMG;
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
tx_mcs = le16_to_cpu(reply.evt.bf_mcs);
wil_dbg_wmi(wil, "Link status for CID %d MID %d: {\n"
" MCS %s TSF 0x%016llx\n"
" BF status 0x%08x RSSI %d SQI %d%%\n"
" Tx Tpt %d goodput %d Rx goodput %d\n"
" Sectors(rx:tx) my %d:%d peer %d:%d\n"
" Tx mode %d}\n",
cid, vif->mid, WIL_EXTENDED_MCS_CHECK(tx_mcs),
le64_to_cpu(reply.evt.tsf), reply.evt.status,
reply.evt.rssi,
reply.evt.sqi,
le32_to_cpu(reply.evt.tx_tpt),
le32_to_cpu(reply.evt.tx_goodput),
le32_to_cpu(reply.evt.rx_goodput),
le16_to_cpu(reply.evt.my_rx_sector),
le16_to_cpu(reply.evt.my_tx_sector),
le16_to_cpu(reply.evt.other_rx_sector),
le16_to_cpu(reply.evt.other_tx_sector),
reply.evt.tx_mode);
sinfo->generation = wil->sinfo_gen;
sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
BIT_ULL(NL80211_STA_INFO_RX_BITRATE) |
BIT_ULL(NL80211_STA_INFO_TX_BITRATE) |
BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
BIT_ULL(NL80211_STA_INFO_TX_FAILED);
if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG) {
tx_rate_flag = RATE_INFO_FLAGS_EDMG;
rx_rate_flag = RATE_INFO_FLAGS_EDMG;
}
rx_mcs = stats->last_mcs_rx;
/* check extended MCS (12.1) and convert it into
* base MCS (7) + EXTENDED_SC_DMG flag
*/
if (tx_mcs == WIL_EXTENDED_MCS_26) {
tx_rate_flag = RATE_INFO_FLAGS_EXTENDED_SC_DMG;
tx_mcs = WIL_BASE_MCS_FOR_EXTENDED_26;
}
if (rx_mcs == WIL_EXTENDED_MCS_26) {
rx_rate_flag = RATE_INFO_FLAGS_EXTENDED_SC_DMG;
rx_mcs = WIL_BASE_MCS_FOR_EXTENDED_26;
}
sinfo->txrate.flags = tx_rate_flag;
sinfo->rxrate.flags = rx_rate_flag;
sinfo->txrate.mcs = tx_mcs;
sinfo->rxrate.mcs = rx_mcs;
sinfo->txrate.n_bonded_ch =
wil_tx_cb_mode_to_n_bonded(reply.evt.tx_mode);
sinfo->rxrate.n_bonded_ch =
wil_rx_cb_mode_to_n_bonded(stats->last_cb_mode_rx);
sinfo->rx_bytes = stats->rx_bytes;
sinfo->rx_packets = stats->rx_packets;
sinfo->rx_dropped_misc = stats->rx_dropped;
sinfo->tx_bytes = stats->tx_bytes;
sinfo->tx_packets = stats->tx_packets;
sinfo->tx_failed = stats->tx_errors;
if (test_bit(wil_vif_fwconnected, vif->status)) {
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING,
wil->fw_capabilities))
sinfo->signal = reply.evt.rssi;
else
sinfo->signal = reply.evt.sqi;
}
return rc;
}
static int wil_cfg80211_get_station(struct wiphy *wiphy,
struct net_device *ndev,
const u8 *mac, struct station_info *sinfo)
{
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
int cid = wil_find_cid(wil, vif->mid, mac);
wil_dbg_misc(wil, "get_station: %pM CID %d MID %d\n", mac, cid,
vif->mid);
if (!wil_cid_valid(wil, cid))
return -ENOENT;
rc = wil_cid_fill_sinfo(vif, cid, sinfo);
return rc;
}
/*
* Find @idx-th active STA for specific MID for station dump.
*/
int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx)
{
int i;
for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].status == wil_sta_unused)
continue;
if (wil->sta[i].mid != mid)
continue;
if (idx == 0)
return i;
idx--;
}
return -ENOENT;
}
static int wil_cfg80211_dump_station(struct wiphy *wiphy,
struct net_device *dev, int idx,
u8 *mac, struct station_info *sinfo)
{
struct wil6210_vif *vif = ndev_to_vif(dev);
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
int cid = wil_find_cid_by_idx(wil, vif->mid, idx);
if (!wil_cid_valid(wil, cid))
return -ENOENT;
ether_addr_copy(mac, wil->sta[cid].addr);
wil_dbg_misc(wil, "dump_station: %pM CID %d MID %d\n", mac, cid,
vif->mid);
rc = wil_cid_fill_sinfo(vif, cid, sinfo);
return rc;
}
static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "start_p2p_device: entered\n");
wil->p2p_dev_started = 1;
return 0;
}
static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
if (!wil->p2p_dev_started)
return;
wil_dbg_misc(wil, "stop_p2p_device: entered\n");
mutex_lock(&wil->mutex);
mutex_lock(&wil->vif_mutex);
wil_p2p_stop_radio_operations(wil);
wil->p2p_dev_started = 0;
mutex_unlock(&wil->vif_mutex);
mutex_unlock(&wil->mutex);
}
static int wil_cfg80211_validate_add_iface(struct wil6210_priv *wil,
enum nl80211_iftype new_type)
{
int i;
struct wireless_dev *wdev;
struct iface_combination_params params = {
.num_different_channels = 1,
};
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
if (wil->vifs[i]) {
wdev = vif_to_wdev(wil->vifs[i]);
params.iftype_num[wdev->iftype]++;
}
}
params.iftype_num[new_type]++;
return cfg80211_check_combinations(wil->wiphy, ¶ms);
}
static int wil_cfg80211_validate_change_iface(struct wil6210_priv *wil,
struct wil6210_vif *vif,
enum nl80211_iftype new_type)
{
int i, ret = 0;
struct wireless_dev *wdev;
struct iface_combination_params params = {
.num_different_channels = 1,
};
bool check_combos = false;
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif_pos = wil->vifs[i];
if (vif_pos && vif != vif_pos) {
wdev = vif_to_wdev(vif_pos);
params.iftype_num[wdev->iftype]++;
check_combos = true;
}
}
if (check_combos) {
params.iftype_num[new_type]++;
ret = cfg80211_check_combinations(wil->wiphy, ¶ms);
}
return ret;
}
static struct wireless_dev *
wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct net_device *ndev_main = wil->main_ndev, *ndev;
struct wil6210_vif *vif;
struct wireless_dev *p2p_wdev, *wdev;
int rc;
wil_dbg_misc(wil, "add_iface, type %d\n", type);
/* P2P device is not a real virtual interface, it is a management-only
* interface that shares the main interface.
* Skip concurrency checks here.
*/
if (type == NL80211_IFTYPE_P2P_DEVICE) {
if (wil->p2p_wdev) {
wil_err(wil, "P2P_DEVICE interface already created\n");
return ERR_PTR(-EINVAL);
}
p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL);
if (!p2p_wdev)
return ERR_PTR(-ENOMEM);
p2p_wdev->iftype = type;
p2p_wdev->wiphy = wiphy;
/* use our primary ethernet address */
ether_addr_copy(p2p_wdev->address, ndev_main->perm_addr);
wil->p2p_wdev = p2p_wdev;
return p2p_wdev;
}
if (!wil->wiphy->n_iface_combinations) {
wil_err(wil, "virtual interfaces not supported\n");
return ERR_PTR(-EINVAL);
}
rc = wil_cfg80211_validate_add_iface(wil, type);
if (rc) {
wil_err(wil, "iface validation failed, err=%d\n", rc);
return ERR_PTR(rc);
}
vif = wil_vif_alloc(wil, name, name_assign_type, type);
if (IS_ERR(vif))
return ERR_CAST(vif);
ndev = vif_to_ndev(vif);
ether_addr_copy(ndev->perm_addr, ndev_main->perm_addr);
if (is_valid_ether_addr(params->macaddr)) {
eth_hw_addr_set(ndev, params->macaddr);
} else {
u8 addr[ETH_ALEN];
ether_addr_copy(addr, ndev_main->perm_addr);
addr[0] = (addr[0] ^ (1 << vif->mid)) | 0x2; /* locally administered */
eth_hw_addr_set(ndev, addr);
}
wdev = vif_to_wdev(vif);
ether_addr_copy(wdev->address, ndev->dev_addr);
rc = wil_vif_add(wil, vif);
if (rc)
goto out;
wil_info(wil, "added VIF, mid %d iftype %d MAC %pM\n",
vif->mid, type, wdev->address);
return wdev;
out:
wil_vif_free(vif);
return ERR_PTR(rc);
}
int wil_vif_prepare_stop(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wireless_dev *wdev = vif_to_wdev(vif);
struct net_device *ndev;
int rc;
if (wdev->iftype != NL80211_IFTYPE_AP)
return 0;
ndev = vif_to_ndev(vif);
if (netif_carrier_ok(ndev)) {
rc = wmi_pcp_stop(vif);
if (rc) {
wil_info(wil, "failed to stop AP, status %d\n",
rc);
/* continue */
}
wil_bcast_fini(vif);
netif_carrier_off(ndev);
}
return 0;
}
static int wil_cfg80211_del_iface(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
int rc;
wil_dbg_misc(wil, "del_iface\n");
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
if (wdev != wil->p2p_wdev) {
wil_err(wil, "delete of incorrect interface 0x%p\n",
wdev);
return -EINVAL;
}
wil_cfg80211_stop_p2p_device(wiphy, wdev);
wil_p2p_wdev_free(wil);
return 0;
}
if (vif->mid == 0) {
wil_err(wil, "cannot remove the main interface\n");
return -EINVAL;
}
rc = wil_vif_prepare_stop(vif);
if (rc)
goto out;
wil_info(wil, "deleted VIF, mid %d iftype %d MAC %pM\n",
vif->mid, wdev->iftype, wdev->address);
wil_vif_remove(wil, vif->mid);
out:
return rc;
}
static bool wil_is_safe_switch(enum nl80211_iftype from,
enum nl80211_iftype to)
{
if (from == NL80211_IFTYPE_STATION &&
to == NL80211_IFTYPE_P2P_CLIENT)
return true;
return false;
}
static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type,
struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wireless_dev *wdev = vif_to_wdev(vif);
int rc;
bool fw_reset = false;
wil_dbg_misc(wil, "change_iface: type=%d\n", type);
if (wiphy->n_iface_combinations) {
rc = wil_cfg80211_validate_change_iface(wil, vif, type);
if (rc) {
wil_err(wil, "iface validation failed, err=%d\n", rc);
return rc;
}
}
/* do not reset FW when there are active VIFs,
* because it can cause significant disruption
*/
if (!wil_has_other_active_ifaces(wil, ndev, true, false) &&
netif_running(ndev) && !wil_is_recovery_blocked(wil) &&
!wil_is_safe_switch(wdev->iftype, type)) {
wil_dbg_misc(wil, "interface is up. resetting...\n");
mutex_lock(&wil->mutex);
__wil_down(wil);
rc = __wil_up(wil);
mutex_unlock(&wil->mutex);
if (rc)
return rc;
fw_reset = true;
}
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
break;
case NL80211_IFTYPE_MONITOR:
if (params->flags)
wil->monitor_flags = params->flags;
break;
default:
return -EOPNOTSUPP;
}
if (vif->mid != 0 && wil_has_active_ifaces(wil, true, false)) {
if (!fw_reset)
wil_vif_prepare_stop(vif);
rc = wmi_port_delete(wil, vif->mid);
if (rc)
return rc;
rc = wmi_port_allocate(wil, vif->mid, ndev->dev_addr, type);
if (rc)
return rc;
}
wdev->iftype = type;
return 0;
}
static int wil_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = request->wdev;
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
struct {
struct wmi_start_scan_cmd cmd;
u16 chnl[4];
} __packed cmd;
uint i, n;
int rc;
wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
/* scan is supported on client interfaces and on AP interface */
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_AP:
break;
default:
return -EOPNOTSUPP;
}
/* FW don't support scan after connection attempt */
if (test_bit(wil_status_dontscan, wil->status)) {
wil_err(wil, "Can't scan now\n");
return -EBUSY;
}
mutex_lock(&wil->mutex);
mutex_lock(&wil->vif_mutex);
if (vif->scan_request || vif->p2p.discovery_started) {
wil_err(wil, "Already scanning\n");
mutex_unlock(&wil->vif_mutex);
rc = -EAGAIN;
goto out;
}
mutex_unlock(&wil->vif_mutex);
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
if (!wil->p2p_dev_started) {
wil_err(wil, "P2P search requested on stopped P2P device\n");
rc = -EIO;
goto out;
}
/* social scan on P2P_DEVICE is handled as p2p search */
if (wil_p2p_is_social_scan(request)) {
vif->scan_request = request;
if (vif->mid == 0)
wil->radio_wdev = wdev;
rc = wil_p2p_search(vif, request);
if (rc) {
if (vif->mid == 0)
wil->radio_wdev =
wil->main_ndev->ieee80211_ptr;
vif->scan_request = NULL;
}
goto out;
}
}
(void)wil_p2p_stop_discovery(vif);
wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
wil_dbg_misc(wil, "SSID count: %d", request->n_ssids);
for (i = 0; i < request->n_ssids; i++) {
wil_dbg_misc(wil, "SSID[%d]", i);
wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
request->ssids[i].ssid,
request->ssids[i].ssid_len, true);
}
if (request->n_ssids)
rc = wmi_set_ssid(vif, request->ssids[0].ssid_len,
request->ssids[0].ssid);
else
rc = wmi_set_ssid(vif, 0, NULL);
if (rc) {
wil_err(wil, "set SSID for scan request failed: %d\n", rc);
goto out;
}
vif->scan_request = request;
mod_timer(&vif->scan_timer, jiffies + WIL6210_SCAN_TO);
memset(&cmd, 0, sizeof(cmd));
cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
cmd.cmd.num_channels = 0;
n = min(request->n_channels, 4U);
for (i = 0; i < n; i++) {
int ch = request->channels[i]->hw_value;
if (ch == 0) {
wil_err(wil,
"Scan requested for unknown frequency %dMhz\n",
request->channels[i]->center_freq);
continue;
}
/* 0-based channel indexes */
cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch,
request->channels[i]->center_freq);
}
if (request->ie_len)
wil_hex_dump_misc("Scan IE ", DUMP_PREFIX_OFFSET, 16, 1,
request->ie, request->ie_len, true);
else
wil_dbg_misc(wil, "Scan has no IE's\n");
rc = wmi_set_ie(vif, WMI_FRAME_PROBE_REQ,
request->ie_len, request->ie);
if (rc)
goto out_restore;
if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
cmd.cmd.discovery_mode = 1;
wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
}
if (vif->mid == 0)
wil->radio_wdev = wdev;
rc = wmi_send(wil, WMI_START_SCAN_CMDID, vif->mid,
&cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
out_restore:
if (rc) {
del_timer_sync(&vif->scan_timer);
if (vif->mid == 0)
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
vif->scan_request = NULL;
}
out:
mutex_unlock(&wil->mutex);
return rc;
}
static void wil_cfg80211_abort_scan(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
wil_dbg_misc(wil, "wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
mutex_lock(&wil->mutex);
mutex_lock(&wil->vif_mutex);
if (!vif->scan_request)
goto out;
if (wdev != vif->scan_request->wdev) {
wil_dbg_misc(wil, "abort scan was called on the wrong iface\n");
goto out;
}
if (wdev == wil->p2p_wdev && wil->radio_wdev == wil->p2p_wdev)
wil_p2p_stop_radio_operations(wil);
else
wil_abort_scan(vif, true);
out:
mutex_unlock(&wil->vif_mutex);
mutex_unlock(&wil->mutex);
}
static void wil_print_crypto(struct wil6210_priv *wil,
struct cfg80211_crypto_settings *c)
{
int i, n;
wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n",
c->wpa_versions, c->cipher_group);
wil_dbg_misc(wil, "Pairwise ciphers [%d] {\n", c->n_ciphers_pairwise);
n = min_t(int, c->n_ciphers_pairwise, ARRAY_SIZE(c->ciphers_pairwise));
for (i = 0; i < n; i++)
wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
c->ciphers_pairwise[i]);
wil_dbg_misc(wil, "}\n");
wil_dbg_misc(wil, "AKM suites [%d] {\n", c->n_akm_suites);
n = min_t(int, c->n_akm_suites, ARRAY_SIZE(c->akm_suites));
for (i = 0; i < n; i++)
wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
c->akm_suites[i]);
wil_dbg_misc(wil, "}\n");
wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n",
c->control_port, be16_to_cpu(c->control_port_ethertype),
c->control_port_no_encrypt);
}
static const char *
wil_get_auth_type_name(enum nl80211_auth_type auth_type)
{
switch (auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
return "OPEN_SYSTEM";
case NL80211_AUTHTYPE_SHARED_KEY:
return "SHARED_KEY";
case NL80211_AUTHTYPE_FT:
return "FT";
case NL80211_AUTHTYPE_NETWORK_EAP:
return "NETWORK_EAP";
case NL80211_AUTHTYPE_SAE:
return "SAE";
case NL80211_AUTHTYPE_AUTOMATIC:
return "AUTOMATIC";
default:
return "unknown";
}
}
static void wil_print_connect_params(struct wil6210_priv *wil,
struct cfg80211_connect_params *sme)
{
wil_info(wil, "Connecting to:\n");
if (sme->channel) {
wil_info(wil, " Channel: %d freq %d\n",
sme->channel->hw_value, sme->channel->center_freq);
}
if (sme->bssid)
wil_info(wil, " BSSID: %pM\n", sme->bssid);
if (sme->ssid)
print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
16, 1, sme->ssid, sme->ssid_len, true);
if (sme->prev_bssid)
wil_info(wil, " Previous BSSID=%pM\n", sme->prev_bssid);
wil_info(wil, " Auth Type: %s\n",
wil_get_auth_type_name(sme->auth_type));
wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
wil_info(wil, " PBSS: %d\n", sme->pbss);
wil_print_crypto(wil, &sme->crypto);
}
static int wil_ft_connect(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wmi_ft_auth_cmd auth_cmd;
int rc;
if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) {
wil_err(wil, "FT: FW does not support FT roaming\n");
return -EOPNOTSUPP;
}
if (!sme->prev_bssid) {
wil_err(wil, "FT: prev_bssid was not set\n");
return -EINVAL;
}
if (ether_addr_equal(sme->prev_bssid, sme->bssid)) {
wil_err(wil, "FT: can not roam to same AP\n");
return -EINVAL;
}
if (!test_bit(wil_vif_fwconnected, vif->status)) {
wil_err(wil, "FT: roam while not connected\n");
return -EINVAL;
}
if (vif->privacy != sme->privacy) {
wil_err(wil, "FT: privacy mismatch, current (%d) roam (%d)\n",
vif->privacy, sme->privacy);
return -EINVAL;
}
if (sme->pbss) {
wil_err(wil, "FT: roam is not valid for PBSS\n");
return -EINVAL;
}
memset(&auth_cmd, 0, sizeof(auth_cmd));
auth_cmd.channel = sme->channel->hw_value - 1;
ether_addr_copy(auth_cmd.bssid, sme->bssid);
wil_info(wil, "FT: roaming\n");
set_bit(wil_vif_ft_roam, vif->status);
rc = wmi_send(wil, WMI_FT_AUTH_CMDID, vif->mid,
&auth_cmd, sizeof(auth_cmd));
if (rc == 0)
mod_timer(&vif->connect_timer,
jiffies + msecs_to_jiffies(5000));
else
clear_bit(wil_vif_ft_roam, vif->status);
return rc;
}
static int wil_get_wmi_edmg_channel(struct wil6210_priv *wil, u8 edmg_bw_config,
u8 edmg_channels, u8 *wmi_ch)
{
if (!edmg_bw_config) {
*wmi_ch = 0;
return 0;
} else if (edmg_bw_config == WIL_EDMG_BW_CONFIGURATION) {
/* convert from edmg channel bitmap into edmg channel number */
switch (edmg_channels) {
case WIL_EDMG_CHANNEL_9_SUBCHANNELS:
return wil_spec2wmi_ch(9, wmi_ch);
case WIL_EDMG_CHANNEL_10_SUBCHANNELS:
return wil_spec2wmi_ch(10, wmi_ch);
case WIL_EDMG_CHANNEL_11_SUBCHANNELS:
return wil_spec2wmi_ch(11, wmi_ch);
default:
wil_err(wil, "Unsupported edmg channel bitmap 0x%x\n",
edmg_channels);
return -EINVAL;
}
} else {
wil_err(wil, "Unsupported EDMG BW configuration %d\n",
edmg_bw_config);
return -EINVAL;
}
}
static int wil_cfg80211_connect(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct cfg80211_bss *bss;
struct wmi_connect_cmd conn;
const u8 *ssid_eid;
const u8 *rsn_eid;
int ch;
int rc = 0;
bool is_ft_roam = false;
u8 network_type;
enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
wil_dbg_misc(wil, "connect, mid=%d\n", vif->mid);
wil_print_connect_params(wil, sme);
if (sme->auth_type == NL80211_AUTHTYPE_FT)
is_ft_roam = true;
if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC &&
test_bit(wil_vif_fwconnected, vif->status))
is_ft_roam = true;
if (!is_ft_roam)
if (test_bit(wil_vif_fwconnecting, vif->status) ||
test_bit(wil_vif_fwconnected, vif->status))
return -EALREADY;
if (sme->ie_len > WMI_MAX_IE_LEN) {
wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
return -ERANGE;
}
rsn_eid = sme->ie ?
cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
NULL;
if (sme->privacy && !rsn_eid) {
wil_info(wil, "WSC connection\n");
if (is_ft_roam) {
wil_err(wil, "No WSC with FT roam\n");
return -EINVAL;
}
}
if (sme->pbss)
bss_type = IEEE80211_BSS_TYPE_PBSS;
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
bss_type, IEEE80211_PRIVACY_ANY);
if (!bss) {
wil_err(wil, "Unable to find BSS\n");
return -ENOENT;
}
ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
if (!ssid_eid) {
wil_err(wil, "No SSID\n");
rc = -ENOENT;
goto out;
}
vif->privacy = sme->privacy;
vif->pbss = sme->pbss;
rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
if (rc)
goto out;
switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
case WLAN_CAPABILITY_DMG_TYPE_AP:
network_type = WMI_NETTYPE_INFRA;
break;
case WLAN_CAPABILITY_DMG_TYPE_PBSS:
network_type = WMI_NETTYPE_P2P;
break;
default:
wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
bss->capability);
rc = -EINVAL;
goto out;
}
ch = bss->channel->hw_value;
if (ch == 0) {
wil_err(wil, "BSS at unknown frequency %dMhz\n",
bss->channel->center_freq);
rc = -EOPNOTSUPP;
goto out;
}
if (is_ft_roam) {
if (network_type != WMI_NETTYPE_INFRA) {
wil_err(wil, "FT: Unsupported BSS type, capability= 0x%04x\n",
bss->capability);
rc = -EINVAL;
goto out;
}
rc = wil_ft_connect(wiphy, ndev, sme);
if (rc == 0)
vif->bss = bss;
goto out;
}
if (vif->privacy) {
/* For secure assoc, remove old keys */
rc = wmi_del_cipher_key(vif, 0, bss->bssid,
WMI_KEY_USE_PAIRWISE);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
goto out;
}
rc = wmi_del_cipher_key(vif, 0, bss->bssid,
WMI_KEY_USE_RX_GROUP);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
goto out;
}
}
/* WMI_CONNECT_CMD */
memset(&conn, 0, sizeof(conn));
conn.network_type = network_type;
if (vif->privacy) {
if (rsn_eid) { /* regular secure connection */
conn.dot11_auth_mode = WMI_AUTH11_SHARED;
conn.auth_mode = WMI_AUTH_WPA2_PSK;
conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
conn.pairwise_crypto_len = 16;
conn.group_crypto_type = WMI_CRYPT_AES_GCMP;
conn.group_crypto_len = 16;
} else { /* WSC */
conn.dot11_auth_mode = WMI_AUTH11_WSC;
conn.auth_mode = WMI_AUTH_NONE;
}
} else { /* insecure connection */
conn.dot11_auth_mode = WMI_AUTH11_OPEN;
conn.auth_mode = WMI_AUTH_NONE;
}
conn.ssid_len = min_t(u8, ssid_eid[1], 32);
memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
conn.channel = ch - 1;
rc = wil_get_wmi_edmg_channel(wil, sme->edmg.bw_config,
sme->edmg.channels, &conn.edmg_channel);
if (rc < 0)
return rc;
ether_addr_copy(conn.bssid, bss->bssid);
ether_addr_copy(conn.dst_mac, bss->bssid);
set_bit(wil_vif_fwconnecting, vif->status);
rc = wmi_send(wil, WMI_CONNECT_CMDID, vif->mid, &conn, sizeof(conn));
if (rc == 0) {
netif_carrier_on(ndev);
if (!wil_has_other_active_ifaces(wil, ndev, false, true))
wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
vif->bss = bss;
/* Connect can take lots of time */
mod_timer(&vif->connect_timer,
jiffies + msecs_to_jiffies(5000));
} else {
clear_bit(wil_vif_fwconnecting, vif->status);
}
out:
cfg80211_put_bss(wiphy, bss);
return rc;
}
static int wil_cfg80211_disconnect(struct wiphy *wiphy,
struct net_device *ndev,
u16 reason_code)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(ndev);
wil_dbg_misc(wil, "disconnect: reason=%d, mid=%d\n",
reason_code, vif->mid);
if (!(test_bit(wil_vif_fwconnecting, vif->status) ||
test_bit(wil_vif_fwconnected, vif->status))) {
wil_err(wil, "Disconnect was called while disconnected\n");
return 0;
}
vif->locally_generated_disc = true;
rc = wmi_call(wil, WMI_DISCONNECT_CMDID, vif->mid, NULL, 0,
WMI_DISCONNECT_EVENTID, NULL, 0,
WIL6210_DISCONNECT_TO_MS);
if (rc)
wil_err(wil, "disconnect error %d\n", rc);
return rc;
}
static int wil_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
/* these parameters are explicitly not supported */
if (changed & (WIPHY_PARAM_RETRY_LONG |
WIPHY_PARAM_FRAG_THRESHOLD |
WIPHY_PARAM_RTS_THRESHOLD))
return -ENOTSUPP;
if (changed & WIPHY_PARAM_RETRY_SHORT) {
rc = wmi_set_mgmt_retry(wil, wiphy->retry_short);
if (rc)
return rc;
}
return 0;
}
int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
u64 *cookie)
{
const u8 *buf = params->buf;
size_t len = params->len;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
int rc;
bool tx_status;
wil_dbg_misc(wil, "mgmt_tx: channel %d offchan %d, wait %d\n",
params->chan ? params->chan->hw_value : -1,
params->offchan,
params->wait);
/* Note, currently we support the "wait" parameter only on AP mode.
* In other modes, user-space must call remain_on_channel before
* mgmt_tx or listen on a channel other than active one.
*/
if (params->chan && params->chan->hw_value == 0) {
wil_err(wil, "invalid channel\n");
return -EINVAL;
}
if (wdev->iftype != NL80211_IFTYPE_AP) {
wil_dbg_misc(wil,
"send WMI_SW_TX_REQ_CMDID on non-AP interfaces\n");
rc = wmi_mgmt_tx(vif, buf, len);
goto out;
}
if (!params->chan || params->chan->hw_value == vif->channel) {
wil_dbg_misc(wil,
"send WMI_SW_TX_REQ_CMDID for on-channel\n");
rc = wmi_mgmt_tx(vif, buf, len);
goto out;
}
if (params->offchan == 0) {
wil_err(wil,
"invalid channel params: current %d requested %d, off-channel not allowed\n",
vif->channel, params->chan->hw_value);
return -EBUSY;
}
/* use the wmi_mgmt_tx_ext only on AP mode and off-channel */
rc = wmi_mgmt_tx_ext(vif, buf, len, params->chan->hw_value,
params->wait);
out:
/* when the sent packet was not acked by receiver(ACK=0), rc will
* be -EAGAIN. In this case this function needs to return success,
* the ACK=0 will be reflected in tx_status.
*/
tx_status = (rc == 0);
rc = (rc == -EAGAIN) ? 0 : rc;
cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
tx_status, GFP_KERNEL);
return rc;
}
static int wil_cfg80211_set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil->monitor_chandef = *chandef;
return 0;
}
static enum wmi_key_usage wil_detect_key_usage(struct wireless_dev *wdev,
bool pairwise)
{
struct wil6210_priv *wil = wdev_to_wil(wdev);
enum wmi_key_usage rc;
if (pairwise) {
rc = WMI_KEY_USE_PAIRWISE;
} else {
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
rc = WMI_KEY_USE_RX_GROUP;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
rc = WMI_KEY_USE_TX_GROUP;
break;
default:
/* TODO: Rx GTK or Tx GTK? */
wil_err(wil, "Can't determine GTK type\n");
rc = WMI_KEY_USE_RX_GROUP;
break;
}
}
wil_dbg_misc(wil, "detect_key_usage: -> %s\n", key_usage_str[rc]);
return rc;
}
static struct wil_sta_info *
wil_find_sta_by_key_usage(struct wil6210_priv *wil, u8 mid,
enum wmi_key_usage key_usage, const u8 *mac_addr)
{
int cid = -EINVAL;
if (key_usage == WMI_KEY_USE_TX_GROUP)
return NULL; /* not needed */
/* supplicant provides Rx group key in STA mode with NULL MAC address */
if (mac_addr)
cid = wil_find_cid(wil, mid, mac_addr);
else if (key_usage == WMI_KEY_USE_RX_GROUP)
cid = wil_find_cid_by_idx(wil, mid, 0);
if (cid < 0) {
wil_err(wil, "No CID for %pM %s\n", mac_addr,
key_usage_str[key_usage]);
return ERR_PTR(cid);
}
return &wil->sta[cid];
}
void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
struct wil_sta_info *cs,
struct key_params *params)
{
struct wil_tid_crypto_rx_single *cc;
int tid;
if (!cs)
return;
switch (key_usage) {
case WMI_KEY_USE_STORE_PTK:
case WMI_KEY_USE_PAIRWISE:
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
cc = &cs->tid_crypto_rx[tid].key_id[key_index];
if (params->seq)
memcpy(cc->pn, params->seq,
IEEE80211_GCMP_PN_LEN);
else
memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
cc->key_set = true;
}
break;
case WMI_KEY_USE_RX_GROUP:
cc = &cs->group_crypto_rx.key_id[key_index];
if (params->seq)
memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
else
memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
cc->key_set = true;
break;
default:
break;
}
}
static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
struct wil_sta_info *cs)
{
struct wil_tid_crypto_rx_single *cc;
int tid;
if (!cs)
return;
switch (key_usage) {
case WMI_KEY_USE_PAIRWISE:
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
cc = &cs->tid_crypto_rx[tid].key_id[key_index];
cc->key_set = false;
}
break;
case WMI_KEY_USE_RX_GROUP:
cc = &cs->group_crypto_rx.key_id[key_index];
cc->key_set = false;
break;
default:
break;
}
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct net_device *ndev, int link_id,
u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
{
int rc;
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = vif_to_wdev(vif);
enum wmi_key_usage key_usage = wil_detect_key_usage(wdev, pairwise);
struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, vif->mid,
key_usage,
mac_addr);
if (!params) {
wil_err(wil, "NULL params\n");
return -EINVAL;
}
wil_dbg_misc(wil, "add_key: %pM %s[%d] PN %*phN\n",
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
if (IS_ERR(cs)) {
/* in FT, sta info may not be available as add_key may be
* sent by host before FW sends WMI_CONNECT_EVENT
*/
if (!test_bit(wil_vif_ft_roam, vif->status)) {
wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
} else {
wil_del_rx_key(key_index, key_usage, cs);
}
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
"Wrong PN len %d, %pM %s[%d] PN %*phN\n",
params->seq_len, mac_addr,
key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
spin_lock_bh(&wil->eap_lock);
if (pairwise && wdev->iftype == NL80211_IFTYPE_STATION &&
(vif->ptk_rekey_state == WIL_REKEY_M3_RECEIVED ||
vif->ptk_rekey_state == WIL_REKEY_WAIT_M4_SENT)) {
key_usage = WMI_KEY_USE_STORE_PTK;
vif->ptk_rekey_state = WIL_REKEY_WAIT_M4_SENT;
wil_dbg_misc(wil, "Store EAPOL key\n");
}
spin_unlock_bh(&wil->eap_lock);
rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len,
params->key, key_usage);
if (!rc && !IS_ERR(cs)) {
/* update local storage used for AP recovery */
if (key_usage == WMI_KEY_USE_TX_GROUP && params->key &&
params->key_len <= WMI_MAX_KEY_LEN) {
vif->gtk_index = key_index;
memcpy(vif->gtk, params->key, params->key_len);
vif->gtk_len = params->key_len;
}
/* in FT set crypto will take place upon receiving
* WMI_RING_EN_EVENTID event
*/
wil_set_crypto_rx(key_index, key_usage, cs, params);
}
return rc;
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
struct net_device *ndev, int link_id,
u8 key_index, bool pairwise,
const u8 *mac_addr)
{
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = vif_to_wdev(vif);
enum wmi_key_usage key_usage = wil_detect_key_usage(wdev, pairwise);
struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, vif->mid,
key_usage,
mac_addr);
wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr,
key_usage_str[key_usage], key_index);
if (IS_ERR(cs))
wil_info(wil, "Not connected, %pM %s[%d]\n",
mac_addr, key_usage_str[key_usage], key_index);
if (!IS_ERR_OR_NULL(cs))
wil_del_rx_key(key_index, key_usage, cs);
return wmi_del_cipher_key(vif, key_index, mac_addr, key_usage);
}
/* Need to be present or wiphy_new() will WARN */
static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
struct net_device *ndev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "set_default_key: entered\n");
return 0;
}
static int wil_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct ieee80211_channel *chan,
unsigned int duration,
u64 *cookie)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
wil_dbg_misc(wil,
"remain_on_channel: center_freq=%d, duration=%d iftype=%d\n",
chan->center_freq, duration, wdev->iftype);
rc = wil_p2p_listen(wil, wdev, duration, chan, cookie);
return rc;
}
static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
u64 cookie)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
wil_dbg_misc(wil, "cancel_remain_on_channel\n");
return wil_p2p_cancel_listen(vif, cookie);
}
/*
* find a specific IE in a list of IEs
* return a pointer to the beginning of IE in the list
* or NULL if not found
*/
static const u8 *_wil_cfg80211_find_ie(const u8 *ies, u16 ies_len, const u8 *ie,
u16 ie_len)
{
struct ieee80211_vendor_ie *vie;
u32 oui;
/* IE tag at offset 0, length at offset 1 */
if (ie_len < 2 || 2 + ie[1] > ie_len)
return NULL;
if (ie[0] != WLAN_EID_VENDOR_SPECIFIC)
return cfg80211_find_ie(ie[0], ies, ies_len);
/* make sure there is room for 3 bytes OUI + 1 byte OUI type */
if (ie[1] < 4)
return NULL;
vie = (struct ieee80211_vendor_ie *)ie;
oui = vie->oui[0] << 16 | vie->oui[1] << 8 | vie->oui[2];
return cfg80211_find_vendor_ie(oui, vie->oui_type, ies,
ies_len);
}
/*
* merge the IEs in two lists into a single list.
* do not include IEs from the second list which exist in the first list.
* add only vendor specific IEs from second list to keep
* the merged list sorted (since vendor-specific IE has the
* highest tag number)
* caller must free the allocated memory for merged IEs
*/
static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
const u8 *ies2, u16 ies2_len,
u8 **merged_ies, u16 *merged_len)
{
u8 *buf, *dpos;
const u8 *spos;
if (!ies1)
ies1_len = 0;
if (!ies2)
ies2_len = 0;
if (ies1_len == 0 && ies2_len == 0) {
*merged_ies = NULL;
*merged_len = 0;
return 0;
}
buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (ies1)
memcpy(buf, ies1, ies1_len);
dpos = buf + ies1_len;
spos = ies2;
while (spos && (spos + 1 < ies2 + ies2_len)) {
/* IE tag at offset 0, length at offset 1 */
u16 ielen = 2 + spos[1];
if (spos + ielen > ies2 + ies2_len)
break;
if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
(!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len,
spos, ielen))) {
memcpy(dpos, spos, ielen);
dpos += ielen;
}
spos += ielen;
}
*merged_ies = buf;
*merged_len = dpos - buf;
return 0;
}
static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
{
wil_hex_dump_misc("head ", DUMP_PREFIX_OFFSET, 16, 1,
b->head, b->head_len, true);
wil_hex_dump_misc("tail ", DUMP_PREFIX_OFFSET, 16, 1,
b->tail, b->tail_len, true);
wil_hex_dump_misc("BCON IE ", DUMP_PREFIX_OFFSET, 16, 1,
b->beacon_ies, b->beacon_ies_len, true);
wil_hex_dump_misc("PROBE ", DUMP_PREFIX_OFFSET, 16, 1,
b->probe_resp, b->probe_resp_len, true);
wil_hex_dump_misc("PROBE IE ", DUMP_PREFIX_OFFSET, 16, 1,
b->proberesp_ies, b->proberesp_ies_len, true);
wil_hex_dump_misc("ASSOC IE ", DUMP_PREFIX_OFFSET, 16, 1,
b->assocresp_ies, b->assocresp_ies_len, true);
}
/* internal functions for device reset and starting AP */
static u8 *
_wil_cfg80211_get_proberesp_ies(const u8 *proberesp, u16 proberesp_len,
u16 *ies_len)
{
u8 *ies = NULL;
if (proberesp) {
struct ieee80211_mgmt *f =
(struct ieee80211_mgmt *)proberesp;
size_t hlen = offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
ies = f->u.probe_resp.variable;
if (ies_len)
*ies_len = proberesp_len - hlen;
}
return ies;
}
static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
struct cfg80211_beacon_data *bcon)
{
int rc;
u16 len = 0, proberesp_len = 0;
u8 *ies = NULL, *proberesp;
/* update local storage used for AP recovery */
wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, bcon->probe_resp,
bcon->probe_resp_len);
wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len,
bcon->proberesp_ies, bcon->proberesp_ies_len);
wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len,
bcon->assocresp_ies, bcon->assocresp_ies_len);
proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
bcon->probe_resp_len,
&proberesp_len);
rc = _wil_cfg80211_merge_extra_ies(proberesp,
proberesp_len,
bcon->proberesp_ies,
bcon->proberesp_ies_len,
&ies, &len);
if (rc)
goto out;
rc = wmi_set_ie(vif, WMI_FRAME_PROBE_RESP, len, ies);
if (rc)
goto out;
if (bcon->assocresp_ies)
rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_RESP,
bcon->assocresp_ies_len, bcon->assocresp_ies);
else
rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_RESP, len, ies);
#if 0 /* to use beacon IE's, remove this #if 0 */
if (rc)
goto out;
rc = wmi_set_ie(vif, WMI_FRAME_BEACON,
bcon->tail_len, bcon->tail);
#endif
out:
kfree(ies);
return rc;
}
static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
const u8 *ssid, size_t ssid_len, u32 privacy,
int bi, u8 chan, u8 wmi_edmg_channel,
struct cfg80211_beacon_data *bcon,
u8 hidden_ssid, u32 pbss)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(ndev);
int rc;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
u16 proberesp_len = 0;
u8 *proberesp;
bool ft = false;
if (pbss)
wmi_nettype = WMI_NETTYPE_P2P;
wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d\n", vif->mid, is_go);
if (is_go && !pbss) {
wil_err(wil, "P2P GO must be in PBSS\n");
return -ENOTSUPP;
}
wil_set_recovery_state(wil, fw_recovery_idle);
proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
bcon->probe_resp_len,
&proberesp_len);
/* check that the probe response IEs has a MDE */
if ((proberesp && proberesp_len > 0 &&
cfg80211_find_ie(WLAN_EID_MOBILITY_DOMAIN,
proberesp,
proberesp_len)))
ft = true;
if (ft) {
if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING,
wil->fw_capabilities)) {
wil_err(wil, "FW does not support FT roaming\n");
return -ENOTSUPP;
}
set_bit(wil_vif_ft_roam, vif->status);
}
mutex_lock(&wil->mutex);
if (!wil_has_other_active_ifaces(wil, ndev, true, false)) {
__wil_down(wil);
rc = __wil_up(wil);
if (rc)
goto out;
}
rc = wmi_set_ssid(vif, ssid_len, ssid);
if (rc)
goto out;
rc = _wil_cfg80211_set_ies(vif, bcon);
if (rc)
goto out;
vif->privacy = privacy;
vif->channel = chan;
vif->wmi_edmg_channel = wmi_edmg_channel;
vif->hidden_ssid = hidden_ssid;
vif->pbss = pbss;
vif->bi = bi;
memcpy(vif->ssid, ssid, ssid_len);
vif->ssid_len = ssid_len;
netif_carrier_on(ndev);
if (!wil_has_other_active_ifaces(wil, ndev, false, true))
wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, wmi_edmg_channel,
hidden_ssid, is_go);
if (rc)
goto err_pcp_start;
rc = wil_bcast_init(vif);
if (rc)
goto err_bcast;
goto out; /* success */
err_bcast:
wmi_pcp_stop(vif);
err_pcp_start:
netif_carrier_off(ndev);
if (!wil_has_other_active_ifaces(wil, ndev, false, true))
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
out:
mutex_unlock(&wil->mutex);
return rc;
}
void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
{
int rc, i;
struct wiphy *wiphy = wil_to_wiphy(wil);
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
struct net_device *ndev;
struct cfg80211_beacon_data bcon = {};
struct key_params key_params = {};
if (!vif || vif->ssid_len == 0)
continue;
ndev = vif_to_ndev(vif);
bcon.proberesp_ies = vif->proberesp_ies;
bcon.assocresp_ies = vif->assocresp_ies;
bcon.probe_resp = vif->proberesp;
bcon.proberesp_ies_len = vif->proberesp_ies_len;
bcon.assocresp_ies_len = vif->assocresp_ies_len;
bcon.probe_resp_len = vif->proberesp_len;
wil_info(wil,
"AP (vif %d) recovery: privacy %d, bi %d, channel %d, hidden %d, pbss %d\n",
i, vif->privacy, vif->bi, vif->channel,
vif->hidden_ssid, vif->pbss);
wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
vif->ssid, vif->ssid_len, true);
rc = _wil_cfg80211_start_ap(wiphy, ndev,
vif->ssid, vif->ssid_len,
vif->privacy, vif->bi,
vif->channel,
vif->wmi_edmg_channel, &bcon,
vif->hidden_ssid, vif->pbss);
if (rc) {
wil_err(wil, "vif %d recovery failed (%d)\n", i, rc);
continue;
}
if (!vif->privacy || vif->gtk_len == 0)
continue;
key_params.key = vif->gtk;
key_params.key_len = vif->gtk_len;
key_params.seq_len = IEEE80211_GCMP_PN_LEN;
rc = wil_cfg80211_add_key(wiphy, ndev, -1, vif->gtk_index,
false, NULL, &key_params);
if (rc)
wil_err(wil, "vif %d recovery add key failed (%d)\n",
i, rc);
}
}
static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct wil6210_vif *vif = ndev_to_vif(ndev);
int rc;
u32 privacy = 0;
wil_dbg_misc(wil, "change_beacon, mid=%d\n", vif->mid);
wil_print_bcon_data(bcon);
if (bcon->tail &&
cfg80211_find_ie(WLAN_EID_RSN, bcon->tail,
bcon->tail_len))
privacy = 1;
memcpy(vif->ssid, wdev->u.ap.ssid, wdev->u.ap.ssid_len);
vif->ssid_len = wdev->u.ap.ssid_len;
/* in case privacy has changed, need to restart the AP */
if (vif->privacy != privacy) {
wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
vif->privacy, privacy);
rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid,
vif->ssid_len, privacy,
wdev->links[0].ap.beacon_interval,
vif->channel,
vif->wmi_edmg_channel, bcon,
vif->hidden_ssid,
vif->pbss);
} else {
rc = _wil_cfg80211_set_ies(vif, bcon);
}
return rc;
}
static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_ap_settings *info)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct ieee80211_channel *channel = info->chandef.chan;
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 wmi_edmg_channel;
u8 hidden_ssid;
wil_dbg_misc(wil, "start_ap\n");
rc = wil_get_wmi_edmg_channel(wil, info->chandef.edmg.bw_config,
info->chandef.edmg.channels,
&wmi_edmg_channel);
if (rc < 0)
return rc;
if (!channel) {
wil_err(wil, "AP: No channel???\n");
return -EINVAL;
}
switch (info->hidden_ssid) {
case NL80211_HIDDEN_SSID_NOT_IN_USE:
hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
break;
case NL80211_HIDDEN_SSID_ZERO_LEN:
hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
break;
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
break;
default:
wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
return -EOPNOTSUPP;
}
wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
channel->center_freq, info->privacy ? "secure" : "open");
wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
info->privacy, info->auth_type);
wil_dbg_misc(wil, "Hidden SSID mode: %d\n",
info->hidden_ssid);
wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
info->dtim_period);
wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
info->ssid, info->ssid_len, true);
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
rc = _wil_cfg80211_start_ap(wiphy, ndev,
info->ssid, info->ssid_len, info->privacy,
info->beacon_interval, channel->hw_value,
wmi_edmg_channel, bcon, hidden_ssid,
info->pbss);
return rc;
}
static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
struct net_device *ndev,
unsigned int link_id)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(ndev);
bool last;
wil_dbg_misc(wil, "stop_ap, mid=%d\n", vif->mid);
netif_carrier_off(ndev);
last = !wil_has_other_active_ifaces(wil, ndev, false, true);
if (last) {
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_resetting, wil->status);
}
mutex_lock(&wil->mutex);
wmi_pcp_stop(vif);
clear_bit(wil_vif_ft_roam, vif->status);
vif->ssid_len = 0;
wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, NULL, 0);
wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len, NULL, 0);
wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len, NULL, 0);
memset(vif->gtk, 0, WMI_MAX_KEY_LEN);
vif->gtk_len = 0;
if (last)
__wil_down(wil);
else
wil_bcast_fini(vif);
mutex_unlock(&wil->mutex);
return 0;
}
static int wil_cfg80211_add_station(struct wiphy *wiphy,
struct net_device *dev,
const u8 *mac,
struct station_parameters *params)
{
struct wil6210_vif *vif = ndev_to_vif(dev);
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "add station %pM aid %d mid %d mask 0x%x set 0x%x\n",
mac, params->aid, vif->mid,
params->sta_flags_mask, params->sta_flags_set);
if (!disable_ap_sme) {
wil_err(wil, "not supported with AP SME enabled\n");
return -EOPNOTSUPP;
}
if (params->aid > WIL_MAX_DMG_AID) {
wil_err(wil, "invalid aid\n");
return -EINVAL;
}
return wmi_new_sta(vif, mac, params->aid);
}
static int wil_cfg80211_del_station(struct wiphy *wiphy,
struct net_device *dev,
struct station_del_parameters *params)
{
struct wil6210_vif *vif = ndev_to_vif(dev);
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "del_station: %pM, reason=%d mid=%d\n",
params->mac, params->reason_code, vif->mid);
mutex_lock(&wil->mutex);
wil6210_disconnect(vif, params->mac, params->reason_code);
mutex_unlock(&wil->mutex);
return 0;
}
static int wil_cfg80211_change_station(struct wiphy *wiphy,
struct net_device *dev,
const u8 *mac,
struct station_parameters *params)
{
struct wil6210_vif *vif = ndev_to_vif(dev);
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int authorize;
int cid, i;
struct wil_ring_tx_data *txdata = NULL;
wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n",
mac, params->sta_flags_mask, params->sta_flags_set,
vif->mid);
if (!disable_ap_sme) {
wil_dbg_misc(wil, "not supported with AP SME enabled\n");
return -EOPNOTSUPP;
}
if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
return 0;
cid = wil_find_cid(wil, vif->mid, mac);
if (cid < 0) {
wil_err(wil, "station not found\n");
return -ENOLINK;
}
for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++)
if (wil->ring2cid_tid[i][0] == cid) {
txdata = &wil->ring_tx_data[i];
break;
}
if (!txdata) {
wil_err(wil, "ring data not found\n");
return -ENOLINK;
}
authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
txdata->dot1x_open = authorize ? 1 : 0;
wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i,
txdata->dot1x_open);
return 0;
}
/* probe_client handling */
static void wil_probe_client_handle(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct wil_probe_client_req *req)
{
struct net_device *ndev = vif_to_ndev(vif);
struct wil_sta_info *sta = &wil->sta[req->cid];
/* assume STA is alive if it is still connected,
* else FW will disconnect it
*/
bool alive = (sta->status == wil_sta_connected);
cfg80211_probe_status(ndev, sta->addr, req->cookie, alive,
0, false, GFP_KERNEL);
}
static struct list_head *next_probe_client(struct wil6210_vif *vif)
{
struct list_head *ret = NULL;
mutex_lock(&vif->probe_client_mutex);
if (!list_empty(&vif->probe_client_pending)) {
ret = vif->probe_client_pending.next;
list_del(ret);
}
mutex_unlock(&vif->probe_client_mutex);
return ret;
}
void wil_probe_client_worker(struct work_struct *work)
{
struct wil6210_vif *vif = container_of(work, struct wil6210_vif,
probe_client_worker);
struct wil6210_priv *wil = vif_to_wil(vif);
struct wil_probe_client_req *req;
struct list_head *lh;
while ((lh = next_probe_client(vif)) != NULL) {
req = list_entry(lh, struct wil_probe_client_req, list);
wil_probe_client_handle(wil, vif, req);
kfree(req);
}
}
void wil_probe_client_flush(struct wil6210_vif *vif)
{
struct wil_probe_client_req *req, *t;
struct wil6210_priv *wil = vif_to_wil(vif);
wil_dbg_misc(wil, "probe_client_flush\n");
mutex_lock(&vif->probe_client_mutex);
list_for_each_entry_safe(req, t, &vif->probe_client_pending, list) {
list_del(&req->list);
kfree(req);
}
mutex_unlock(&vif->probe_client_mutex);
}
static int wil_cfg80211_probe_client(struct wiphy *wiphy,
struct net_device *dev,
const u8 *peer, u64 *cookie)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(dev);
struct wil_probe_client_req *req;
int cid = wil_find_cid(wil, vif->mid, peer);
wil_dbg_misc(wil, "probe_client: %pM => CID %d MID %d\n",
peer, cid, vif->mid);
if (cid < 0)
return -ENOLINK;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->cid = cid;
req->cookie = cid;
mutex_lock(&vif->probe_client_mutex);
list_add_tail(&req->list, &vif->probe_client_pending);
mutex_unlock(&vif->probe_client_mutex);
*cookie = req->cookie;
queue_work(wil->wq_service, &vif->probe_client_worker);
return 0;
}
static int wil_cfg80211_change_bss(struct wiphy *wiphy,
struct net_device *dev,
struct bss_parameters *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(dev);
if (params->ap_isolate >= 0) {
wil_dbg_misc(wil, "change_bss: ap_isolate MID %d, %d => %d\n",
vif->mid, vif->ap_isolate, params->ap_isolate);
vif->ap_isolate = params->ap_isolate;
}
return 0;
}
static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
struct net_device *dev,
bool enabled, int timeout)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_ps_profile_type ps_profile;
wil_dbg_misc(wil, "enabled=%d, timeout=%d\n",
enabled, timeout);
if (enabled)
ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
else
ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED;
return wil_ps_update(wil, ps_profile);
}
static int wil_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wow)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
/* Setting the wakeup trigger based on wow is TBD */
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
return 0;
}
rc = wil_can_suspend(wil, false);
if (rc)
goto out;
wil_dbg_pm(wil, "suspending\n");
mutex_lock(&wil->mutex);
mutex_lock(&wil->vif_mutex);
wil_p2p_stop_radio_operations(wil);
wil_abort_scan_all_vifs(wil, true);
mutex_unlock(&wil->vif_mutex);
mutex_unlock(&wil->mutex);
out:
return rc;
}
static int wil_cfg80211_resume(struct wiphy *wiphy)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_pm(wil, "resuming\n");
return 0;
}
static int
wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_sched_scan_request *request)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(dev);
int i, rc;
if (vif->mid != 0)
return -EOPNOTSUPP;
wil_dbg_misc(wil,
"sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
request->n_ssids, request->ie_len, request->flags);
for (i = 0; i < request->n_ssids; i++) {
wil_dbg_misc(wil, "SSID[%d]:", i);
wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
request->ssids[i].ssid,
request->ssids[i].ssid_len, true);
}
wil_dbg_misc(wil, "channels:");
for (i = 0; i < request->n_channels; i++)
wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
i == request->n_channels - 1 ? "\n" : "");
wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
request->n_match_sets, request->min_rssi_thold,
request->delay);
for (i = 0; i < request->n_match_sets; i++) {
struct cfg80211_match_set *ms = &request->match_sets[i];
wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
i, ms->rssi_thold);
wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
ms->ssid.ssid,
ms->ssid.ssid_len, true);
}
wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
for (i = 0; i < request->n_scan_plans; i++) {
struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
i, sp->interval, sp->iterations);
}
rc = wmi_set_ie(vif, WMI_FRAME_PROBE_REQ,
request->ie_len, request->ie);
if (rc)
return rc;
return wmi_start_sched_scan(wil, request);
}
static int
wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
u64 reqid)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(dev);
int rc;
if (vif->mid != 0)
return -EOPNOTSUPP;
rc = wmi_stop_sched_scan(wil);
/* device would return error if it thinks PNO is already stopped.
* ignore the return code so user space and driver gets back in-sync
*/
wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
return 0;
}
static int
wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wil6210_vif *vif = ndev_to_vif(dev);
struct cfg80211_bss *bss;
struct wmi_ft_reassoc_cmd reassoc;
int rc = 0;
wil_dbg_misc(wil, "update ft ies, mid=%d\n", vif->mid);
wil_hex_dump_misc("FT IE ", DUMP_PREFIX_OFFSET, 16, 1,
ftie->ie, ftie->ie_len, true);
if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) {
wil_err(wil, "FW does not support FT roaming\n");
return -EOPNOTSUPP;
}
rc = wmi_update_ft_ies(vif, ftie->ie_len, ftie->ie);
if (rc)
return rc;
if (!test_bit(wil_vif_ft_roam, vif->status))
/* vif is not roaming */
return 0;
/* wil_vif_ft_roam is set. wil_cfg80211_update_ft_ies is used as
* a trigger for reassoc
*/
bss = vif->bss;
if (!bss) {
wil_err(wil, "FT: bss is NULL\n");
return -EINVAL;
}
memset(&reassoc, 0, sizeof(reassoc));
ether_addr_copy(reassoc.bssid, bss->bssid);
rc = wmi_send(wil, WMI_FT_REASSOC_CMDID, vif->mid,
&reassoc, sizeof(reassoc));
if (rc)
wil_err(wil, "FT: reassoc failed (%d)\n", rc);
return rc;
}
static int wil_cfg80211_set_multicast_to_unicast(struct wiphy *wiphy,
struct net_device *dev,
const bool enabled)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
if (wil->multicast_to_unicast == enabled)
return 0;
wil_info(wil, "set multicast to unicast, enabled=%d\n", enabled);
wil->multicast_to_unicast = enabled;
return 0;
}
static int wil_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
wil->cqm_rssi_thold = rssi_thold;
rc = wmi_set_cqm_rssi_config(wil, rssi_thold, rssi_hyst);
if (rc)
/* reset stored value upon failure */
wil->cqm_rssi_thold = 0;
return rc;
}
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
.scan = wil_cfg80211_scan,
.abort_scan = wil_cfg80211_abort_scan,
.connect = wil_cfg80211_connect,
.disconnect = wil_cfg80211_disconnect,
.set_wiphy_params = wil_cfg80211_set_wiphy_params,
.change_virtual_intf = wil_cfg80211_change_iface,
.get_station = wil_cfg80211_get_station,
.dump_station = wil_cfg80211_dump_station,
.remain_on_channel = wil_remain_on_channel,
.cancel_remain_on_channel = wil_cancel_remain_on_channel,
.mgmt_tx = wil_cfg80211_mgmt_tx,
.set_monitor_channel = wil_cfg80211_set_channel,
.add_key = wil_cfg80211_add_key,
.del_key = wil_cfg80211_del_key,
.set_default_key = wil_cfg80211_set_default_key,
/* AP mode */
.change_beacon = wil_cfg80211_change_beacon,
.start_ap = wil_cfg80211_start_ap,
.stop_ap = wil_cfg80211_stop_ap,
.add_station = wil_cfg80211_add_station,
.del_station = wil_cfg80211_del_station,
.change_station = wil_cfg80211_change_station,
.probe_client = wil_cfg80211_probe_client,
.change_bss = wil_cfg80211_change_bss,
/* P2P device */
.start_p2p_device = wil_cfg80211_start_p2p_device,
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
.set_cqm_rssi_config = wil_cfg80211_set_cqm_rssi_config,
.suspend = wil_cfg80211_suspend,
.resume = wil_cfg80211_resume,
.sched_scan_start = wil_cfg80211_sched_scan_start,
.sched_scan_stop = wil_cfg80211_sched_scan_stop,
.update_ft_ies = wil_cfg80211_update_ft_ies,
.set_multicast_to_unicast = wil_cfg80211_set_multicast_to_unicast,
};
static void wil_wiphy_init(struct wiphy *wiphy)
{
wiphy->max_scan_ssids = 1;
wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS;
wiphy->max_num_pmkids = 0 /* TODO: */;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_MONITOR);
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (!disable_ap_sme)
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
__func__, wiphy->flags);
wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz;
/* may change after reading FW capabilities */
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
wiphy->cipher_suites = wil_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
wiphy->vendor_commands = wil_nl80211_vendor_commands;
#ifdef CONFIG_PM
wiphy->wowlan = &wil_wowlan_support;
#endif
}
int wil_cfg80211_iface_combinations_from_fw(
struct wil6210_priv *wil, const struct wil_fw_record_concurrency *conc)
{
struct wiphy *wiphy = wil_to_wiphy(wil);
u32 total_limits = 0;
u16 n_combos;
const struct wil_fw_concurrency_combo *combo;
const struct wil_fw_concurrency_limit *limit;
struct ieee80211_iface_combination *iface_combinations;
struct ieee80211_iface_limit *iface_limit;
int i, j;
if (wiphy->iface_combinations) {
wil_dbg_misc(wil, "iface_combinations already set, skipping\n");
return 0;
}
combo = conc->combos;
n_combos = le16_to_cpu(conc->n_combos);
for (i = 0; i < n_combos; i++) {
total_limits += combo->n_limits;
limit = combo->limits + combo->n_limits;
combo = (struct wil_fw_concurrency_combo *)limit;
}
iface_combinations =
kzalloc(n_combos * sizeof(struct ieee80211_iface_combination) +
total_limits * sizeof(struct ieee80211_iface_limit),
GFP_KERNEL);
if (!iface_combinations)
return -ENOMEM;
iface_limit = (struct ieee80211_iface_limit *)(iface_combinations +
n_combos);
combo = conc->combos;
for (i = 0; i < n_combos; i++) {
iface_combinations[i].max_interfaces = combo->max_interfaces;
iface_combinations[i].num_different_channels =
combo->n_diff_channels;
iface_combinations[i].beacon_int_infra_match =
combo->same_bi;
iface_combinations[i].n_limits = combo->n_limits;
wil_dbg_misc(wil,
"iface_combination %d: max_if %d, num_ch %d, bi_match %d\n",
i, iface_combinations[i].max_interfaces,
iface_combinations[i].num_different_channels,
iface_combinations[i].beacon_int_infra_match);
limit = combo->limits;
for (j = 0; j < combo->n_limits; j++) {
iface_limit[j].max = le16_to_cpu(limit[j].max);
iface_limit[j].types = le16_to_cpu(limit[j].types);
wil_dbg_misc(wil,
"limit %d: max %d types 0x%x\n", j,
iface_limit[j].max, iface_limit[j].types);
}
iface_combinations[i].limits = iface_limit;
iface_limit += combo->n_limits;
limit += combo->n_limits;
combo = (struct wil_fw_concurrency_combo *)limit;
}
wil_dbg_misc(wil, "multiple VIFs supported, n_mids %d\n", conc->n_mids);
wil->max_vifs = conc->n_mids + 1; /* including main interface */
if (wil->max_vifs > WIL_MAX_VIFS) {
wil_info(wil, "limited number of VIFs supported(%d, FW %d)\n",
WIL_MAX_VIFS, wil->max_vifs);
wil->max_vifs = WIL_MAX_VIFS;
}
wiphy->n_iface_combinations = n_combos;
wiphy->iface_combinations = iface_combinations;
return 0;
}
struct wil6210_priv *wil_cfg80211_init(struct device *dev)
{
struct wiphy *wiphy;
struct wil6210_priv *wil;
struct ieee80211_channel *ch;
dev_dbg(dev, "%s()\n", __func__);
/* Note: the wireless_dev structure is no longer allocated here.
* Instead, it is allocated as part of the net_device structure
* for main interface and each VIF.
*/
wiphy = wiphy_new(&wil_cfg80211_ops, sizeof(struct wil6210_priv));
if (!wiphy)
return ERR_PTR(-ENOMEM);
set_wiphy_dev(wiphy, dev);
wil_wiphy_init(wiphy);
wil = wiphy_to_wil(wiphy);
wil->wiphy = wiphy;
/* default monitor channel */
ch = wiphy->bands[NL80211_BAND_60GHZ]->channels;
cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
return wil;
}
void wil_cfg80211_deinit(struct wil6210_priv *wil)
{
struct wiphy *wiphy = wil_to_wiphy(wil);
dev_dbg(wil_to_dev(wil), "%s()\n", __func__);
if (!wiphy)
return;
kfree(wiphy->iface_combinations);
wiphy->iface_combinations = NULL;
wiphy_free(wiphy);
/* do not access wil6210_priv after returning from here */
}
void wil_p2p_wdev_free(struct wil6210_priv *wil)
{
struct wireless_dev *p2p_wdev;
mutex_lock(&wil->vif_mutex);
p2p_wdev = wil->p2p_wdev;
wil->p2p_wdev = NULL;
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
mutex_unlock(&wil->vif_mutex);
if (p2p_wdev) {
cfg80211_unregister_wdev(p2p_wdev);
kfree(p2p_wdev);
}
}
static int wil_rf_sector_status_to_rc(u8 status)
{
switch (status) {
case WMI_RF_SECTOR_STATUS_SUCCESS:
return 0;
case WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR:
return -EINVAL;
case WMI_RF_SECTOR_STATUS_BUSY_ERROR:
return -EAGAIN;
case WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR:
return -EOPNOTSUPP;
default:
return -EINVAL;
}
}
static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct wil6210_priv *wil = wdev_to_wil(wdev);
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
int rc;
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
u16 sector_index;
u8 sector_type;
u32 rf_modules_vec;
struct wmi_get_rf_sector_params_cmd cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_get_rf_sector_params_done_event evt;
} __packed reply = {
.evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR},
};
struct sk_buff *msg;
struct nlattr *nl_cfgs, *nl_cfg;
u32 i;
struct wmi_rf_sector_info *si;
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
}
if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
!tb[QCA_ATTR_DMG_RF_MODULE_MASK]) {
wil_err(wil, "Invalid rf sector spec\n");
return -EINVAL;
}
sector_index = nla_get_u16(
tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
if (sector_index >= WIL_MAX_RF_SECTORS) {
wil_err(wil, "Invalid sector index %d\n", sector_index);
return -EINVAL;
}
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
wil_err(wil, "Invalid sector type %d\n", sector_type);
return -EINVAL;
}
rf_modules_vec = nla_get_u32(
tb[QCA_ATTR_DMG_RF_MODULE_MASK]);
if (rf_modules_vec >= BIT(WMI_MAX_RF_MODULES_NUM)) {
wil_err(wil, "Invalid rf module mask 0x%x\n", rf_modules_vec);
return -EINVAL;
}
cmd.sector_idx = cpu_to_le16(sector_index);
cmd.sector_type = sector_type;
cmd.rf_modules_vec = rf_modules_vec & 0xFF;
rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, vif->mid,
&cmd, sizeof(cmd), WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID,
&reply, sizeof(reply),
500);
if (rc)
return rc;
if (reply.evt.status) {
wil_err(wil, "get rf sector cfg failed with status %d\n",
reply.evt.status);
return wil_rf_sector_status_to_rc(reply.evt.status);
}
msg = cfg80211_vendor_cmd_alloc_reply_skb(
wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
if (!msg)
return -ENOMEM;
if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
le64_to_cpu(reply.evt.tsf),
QCA_ATTR_PAD))
goto nla_put_failure;
nl_cfgs = nla_nest_start_noflag(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
if (!nl_cfgs)
goto nla_put_failure;
for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
if (!(rf_modules_vec & BIT(i)))
continue;
nl_cfg = nla_nest_start_noflag(msg, i);
if (!nl_cfg)
goto nla_put_failure;
si = &reply.evt.sectors_info[i];
if (nla_put_u8(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
i) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
le32_to_cpu(si->etype0)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
le32_to_cpu(si->etype1)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
le32_to_cpu(si->etype2)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
le32_to_cpu(si->psh_hi)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
le32_to_cpu(si->psh_lo)) ||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
le32_to_cpu(si->dtype_swch_off)))
goto nla_put_failure;
nla_nest_end(msg, nl_cfg);
}
nla_nest_end(msg, nl_cfgs);
rc = cfg80211_vendor_cmd_reply(msg);
return rc;
nla_put_failure:
kfree_skb(msg);
return -ENOBUFS;
}
static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct wil6210_priv *wil = wdev_to_wil(wdev);
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
int rc, tmp;
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1];
u16 sector_index, rf_module_index;
u8 sector_type;
u32 rf_modules_vec = 0;
struct wmi_set_rf_sector_params_cmd cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_set_rf_sector_params_done_event evt;
} __packed reply = {
.evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR},
};
struct nlattr *nl_cfg;
struct wmi_rf_sector_info *si;
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
}
if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
!tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) {
wil_err(wil, "Invalid rf sector spec\n");
return -EINVAL;
}
sector_index = nla_get_u16(
tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
if (sector_index >= WIL_MAX_RF_SECTORS) {
wil_err(wil, "Invalid sector index %d\n", sector_index);
return -EINVAL;
}
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
wil_err(wil, "Invalid sector type %d\n", sector_type);
return -EINVAL;
}
memset(&cmd, 0, sizeof(cmd));
cmd.sector_idx = cpu_to_le16(sector_index);
cmd.sector_type = sector_type;
nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
tmp) {
rc = nla_parse_nested_deprecated(tb2,
QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
nl_cfg,
wil_rf_sector_cfg_policy,
NULL);
if (rc) {
wil_err(wil, "invalid sector cfg\n");
return -EINVAL;
}
if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] ||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) {
wil_err(wil, "missing cfg params\n");
return -EINVAL;
}
rf_module_index = nla_get_u8(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX]);
if (rf_module_index >= WMI_MAX_RF_MODULES_NUM) {
wil_err(wil, "invalid RF module index %d\n",
rf_module_index);
return -EINVAL;
}
rf_modules_vec |= BIT(rf_module_index);
si = &cmd.sectors_info[rf_module_index];
si->etype0 = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0]));
si->etype1 = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1]));
si->etype2 = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2]));
si->psh_hi = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI]));
si->psh_lo = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO]));
si->dtype_swch_off = cpu_to_le32(nla_get_u32(
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]));
}
cmd.rf_modules_vec = rf_modules_vec & 0xFF;
rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, vif->mid,
&cmd, sizeof(cmd), WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID,
&reply, sizeof(reply),
500);
if (rc)
return rc;
return wil_rf_sector_status_to_rc(reply.evt.status);
}
static int wil_rf_sector_get_selected(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct wil6210_priv *wil = wdev_to_wil(wdev);
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
int rc;
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
u8 sector_type, mac_addr[ETH_ALEN];
int cid = 0;
struct wmi_get_selected_rf_sector_index_cmd cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_get_selected_rf_sector_index_done_event evt;
} __packed reply = {
.evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR},
};
struct sk_buff *msg;
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
}
if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
wil_err(wil, "Invalid rf sector spec\n");
return -EINVAL;
}
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
wil_err(wil, "Invalid sector type %d\n", sector_type);
return -EINVAL;
}
if (tb[QCA_ATTR_MAC_ADDR]) {
ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
cid = wil_find_cid(wil, vif->mid, mac_addr);
if (cid < 0) {
wil_err(wil, "invalid MAC address %pM\n", mac_addr);
return -ENOENT;
}
} else {
if (test_bit(wil_vif_fwconnected, vif->status)) {
wil_err(wil, "must specify MAC address when connected\n");
return -EINVAL;
}
}
memset(&cmd, 0, sizeof(cmd));
cmd.cid = (u8)cid;
cmd.sector_type = sector_type;
rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
&reply, sizeof(reply),
500);
if (rc)
return rc;
if (reply.evt.status) {
wil_err(wil, "get rf selected sector cfg failed with status %d\n",
reply.evt.status);
return wil_rf_sector_status_to_rc(reply.evt.status);
}
msg = cfg80211_vendor_cmd_alloc_reply_skb(
wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
if (!msg)
return -ENOMEM;
if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
le64_to_cpu(reply.evt.tsf),
QCA_ATTR_PAD) ||
nla_put_u16(msg, QCA_ATTR_DMG_RF_SECTOR_INDEX,
le16_to_cpu(reply.evt.sector_idx)))
goto nla_put_failure;
rc = cfg80211_vendor_cmd_reply(msg);
return rc;
nla_put_failure:
kfree_skb(msg);
return -ENOBUFS;
}
static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil,
u8 mid, u16 sector_index,
u8 sector_type, u8 cid)
{
struct wmi_set_selected_rf_sector_index_cmd cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_set_selected_rf_sector_index_done_event evt;
} __packed reply = {
.evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR},
};
int rc;
memset(&cmd, 0, sizeof(cmd));
cmd.sector_idx = cpu_to_le16(sector_index);
cmd.sector_type = sector_type;
cmd.cid = (u8)cid;
rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID, mid,
&cmd, sizeof(cmd),
WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
&reply, sizeof(reply),
500);
if (rc)
return rc;
return wil_rf_sector_status_to_rc(reply.evt.status);
}
static int wil_rf_sector_set_selected(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct wil6210_priv *wil = wdev_to_wil(wdev);
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
int rc;
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
u16 sector_index;
u8 sector_type, mac_addr[ETH_ALEN], i;
int cid = 0;
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
}
if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
wil_err(wil, "Invalid rf sector spec\n");
return -EINVAL;
}
sector_index = nla_get_u16(
tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
if (sector_index >= WIL_MAX_RF_SECTORS &&
sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
wil_err(wil, "Invalid sector index %d\n", sector_index);
return -EINVAL;
}
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
wil_err(wil, "Invalid sector type %d\n", sector_type);
return -EINVAL;
}
if (tb[QCA_ATTR_MAC_ADDR]) {
ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
if (!is_broadcast_ether_addr(mac_addr)) {
cid = wil_find_cid(wil, vif->mid, mac_addr);
if (cid < 0) {
wil_err(wil, "invalid MAC address %pM\n",
mac_addr);
return -ENOENT;
}
} else {
if (sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
wil_err(wil, "broadcast MAC valid only with unlocking\n");
return -EINVAL;
}
cid = -1;
}
} else {
if (test_bit(wil_vif_fwconnected, vif->status)) {
wil_err(wil, "must specify MAC address when connected\n");
return -EINVAL;
}
/* otherwise, using cid=0 for unassociated station */
}
if (cid >= 0) {
rc = wil_rf_sector_wmi_set_selected(wil, vif->mid, sector_index,
sector_type, cid);
} else {
/* unlock all cids */
rc = wil_rf_sector_wmi_set_selected(
wil, vif->mid, WMI_INVALID_RF_SECTOR_INDEX,
sector_type, WIL_CID_ALL);
if (rc == -EINVAL) {
for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].mid != vif->mid)
continue;
rc = wil_rf_sector_wmi_set_selected(
wil, vif->mid,
WMI_INVALID_RF_SECTOR_INDEX,
sector_type, i);
/* the FW will silently ignore and return
* success for unused cid, so abort the loop
* on any other error
*/
if (rc) {
wil_err(wil, "unlock cid %d failed with status %d\n",
i, rc);
break;
}
}
}
}
return rc;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/cfg80211.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include "wil6210.h"
#include <linux/rtnetlink.h>
#include <linux/pm_runtime.h>
static int n_msi = 3;
module_param(n_msi, int, 0444);
MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - single, or 3 - (default) ");
bool ftm_mode;
module_param(ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
static int wil6210_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused);
static
int wil_set_capabilities(struct wil6210_priv *wil)
{
const char *wil_fw_name;
u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
RGF_USER_REVISION_ID_MASK);
int platform_capa;
struct fw_map *iccm_section, *sct;
bitmap_zero(wil->hw_capa, hw_capa_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
WIL_FW_NAME_DEFAULT;
wil->chip_revision = chip_revision;
switch (jtag_id) {
case JTAG_DEV_ID_SPARROW:
memcpy(fw_mapping, sparrow_fw_mapping,
sizeof(sparrow_fw_mapping));
switch (chip_revision) {
case REVISION_ID_SPARROW_D0:
wil->hw_name = "Sparrow D0";
wil->hw_version = HW_VER_SPARROW_D0;
wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS :
WIL_FW_NAME_SPARROW_PLUS;
if (wil_fw_verify_file_exists(wil, wil_fw_name))
wil->wil_fw_name = wil_fw_name;
sct = wil_find_fw_mapping("mac_rgf_ext");
if (!sct) {
wil_err(wil, "mac_rgf_ext section not found in fw_mapping\n");
return -EINVAL;
}
memcpy(sct, &sparrow_d0_mac_rgf_ext, sizeof(*sct));
break;
case REVISION_ID_SPARROW_B0:
wil->hw_name = "Sparrow B0";
wil->hw_version = HW_VER_SPARROW_B0;
break;
default:
wil->hw_name = "Unknown";
wil->hw_version = HW_VER_UNKNOWN;
break;
}
wil->rgf_fw_assert_code_addr = SPARROW_RGF_FW_ASSERT_CODE;
wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
break;
case JTAG_DEV_ID_TALYN:
wil->hw_name = "Talyn-MA";
wil->hw_version = HW_VER_TALYN;
memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) &
BIT_NO_FLASH_INDICATION)
set_bit(hw_capa_no_flash, wil->hw_capa);
wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
WIL_FW_NAME_TALYN;
if (wil_fw_verify_file_exists(wil, wil_fw_name))
wil->wil_fw_name = wil_fw_name;
break;
case JTAG_DEV_ID_TALYN_MB:
wil->hw_name = "Talyn-MB";
wil->hw_version = HW_VER_TALYN_MB;
memcpy(fw_mapping, talyn_mb_fw_mapping,
sizeof(talyn_mb_fw_mapping));
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
set_bit(hw_capa_no_flash, wil->hw_capa);
wil->use_enhanced_dma_hw = true;
wil->use_rx_hw_reordering = true;
wil->use_compressed_rx_status = true;
wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
WIL_FW_NAME_TALYN;
if (wil_fw_verify_file_exists(wil, wil_fw_name))
wil->wil_fw_name = wil_fw_name;
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
jtag_id, chip_revision);
wil->hw_name = "Unknown";
wil->hw_version = HW_VER_UNKNOWN;
return -EINVAL;
}
wil_init_txrx_ops(wil);
iccm_section = wil_find_fw_mapping("fw_code");
if (!iccm_section) {
wil_err(wil, "fw_code section not found in fw_mapping\n");
return -EINVAL;
}
wil->iccm_base = iccm_section->host;
wil_info(wil, "Board hardware is %s, flash %sexist\n", wil->hw_name,
test_bit(hw_capa_no_flash, wil->hw_capa) ? "doesn't " : "");
/* Get platform capabilities */
if (wil->platform_ops.get_capa) {
platform_capa =
wil->platform_ops.get_capa(wil->platform_handle);
memcpy(wil->platform_capa, &platform_capa,
min(sizeof(wil->platform_capa), sizeof(platform_capa)));
}
wil_info(wil, "platform_capa 0x%lx\n", *wil->platform_capa);
/* extract FW capabilities from file without loading the FW */
wil_request_firmware(wil, wil->wil_fw_name, false);
wil_refresh_fw_capabilities(wil);
return 0;
}
void wil_disable_irq(struct wil6210_priv *wil)
{
int irq = wil->pdev->irq;
disable_irq(irq);
if (wil->n_msi == 3) {
disable_irq(irq + 1);
disable_irq(irq + 2);
}
}
void wil_enable_irq(struct wil6210_priv *wil)
{
int irq = wil->pdev->irq;
enable_irq(irq);
if (wil->n_msi == 3) {
enable_irq(irq + 1);
enable_irq(irq + 2);
}
}
static void wil_remove_all_additional_vifs(struct wil6210_priv *wil)
{
struct wil6210_vif *vif;
int i;
for (i = 1; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
wil_vif_prepare_stop(vif);
wil_vif_remove(wil, vif->mid);
}
}
}
/* Bus ops */
static int wil_if_pcie_enable(struct wil6210_priv *wil)
{
struct pci_dev *pdev = wil->pdev;
int rc;
/* on platforms with buggy ACPI, pdev->msi_enabled may be set to
* allow pci_enable_device to work. This indicates INTx was not routed
* and only MSI should be used
*/
int msi_only = pdev->msi_enabled;
wil_dbg_misc(wil, "if_pcie_enable\n");
pci_set_master(pdev);
/* how many MSI interrupts to request? */
switch (n_msi) {
case 3:
case 1:
wil_dbg_misc(wil, "Setup %d MSI interrupts\n", n_msi);
break;
case 0:
wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
break;
default:
wil_err(wil, "Invalid n_msi=%d, default to 1\n", n_msi);
n_msi = 1;
}
if (n_msi == 3 &&
pci_alloc_irq_vectors(pdev, n_msi, n_msi, PCI_IRQ_MSI) < n_msi) {
wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
n_msi = 1;
}
if (n_msi == 1 && pci_enable_msi(pdev)) {
wil_err(wil, "pci_enable_msi failed, use INTx\n");
n_msi = 0;
}
wil->n_msi = n_msi;
if (wil->n_msi == 0 && msi_only) {
wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
rc = -ENODEV;
goto stop_master;
}
rc = wil6210_init_irq(wil, pdev->irq);
if (rc)
goto release_vectors;
/* need reset here to obtain MAC */
mutex_lock(&wil->mutex);
rc = wil_reset(wil, false);
mutex_unlock(&wil->mutex);
if (rc)
goto release_irq;
return 0;
release_irq:
wil6210_fini_irq(wil, pdev->irq);
release_vectors:
/* safe to call if no allocation */
pci_free_irq_vectors(pdev);
stop_master:
pci_clear_master(pdev);
return rc;
}
static int wil_if_pcie_disable(struct wil6210_priv *wil)
{
struct pci_dev *pdev = wil->pdev;
wil_dbg_misc(wil, "if_pcie_disable\n");
pci_clear_master(pdev);
/* disable and release IRQ */
wil6210_fini_irq(wil, pdev->irq);
/* safe to call if no MSI */
pci_disable_msi(pdev);
/* TODO: disable HW */
return 0;
}
static int wil_platform_rop_ramdump(void *wil_handle, void *buf, uint32_t size)
{
struct wil6210_priv *wil = wil_handle;
if (!wil)
return -EINVAL;
return wil_fw_copy_crash_dump(wil, buf, size);
}
static int wil_platform_rop_fw_recovery(void *wil_handle)
{
struct wil6210_priv *wil = wil_handle;
if (!wil)
return -EINVAL;
wil_fw_error_recovery(wil);
return 0;
}
static void wil_platform_ops_uninit(struct wil6210_priv *wil)
{
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
memset(&wil->platform_ops, 0, sizeof(wil->platform_ops));
}
static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct wil6210_priv *wil;
struct device *dev = &pdev->dev;
int rc;
const struct wil_platform_rops rops = {
.ramdump = wil_platform_rop_ramdump,
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
int i, start_idx;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
" device found [%04x:%04x] (rev %x) bar size 0x%x\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision,
bar_size);
if ((bar_size < WIL6210_MIN_MEM_SIZE) ||
(bar_size > WIL6210_MAX_MEM_SIZE)) {
dev_err(&pdev->dev, "Unexpected BAR0 size 0x%x\n",
bar_size);
return -ENODEV;
}
wil = wil_if_alloc(dev);
if (IS_ERR(wil)) {
rc = (int)PTR_ERR(wil);
dev_err(dev, "wil_if_alloc failed: %d\n", rc);
return rc;
}
wil->pdev = pdev;
pci_set_drvdata(pdev, wil);
wil->bar_size = bar_size;
/* rollback to if_free */
wil->platform_handle =
wil_platform_init(&pdev->dev, &wil->platform_ops, &rops, wil);
if (!wil->platform_handle) {
rc = -ENODEV;
wil_err(wil, "wil_platform_init failed\n");
goto if_free;
}
/* rollback to err_plat */
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
"pci_enable_device failed, retry with MSI only\n");
/* Work around for platforms that can't allocate IRQ:
* retry with MSI only
*/
pdev->msi_enabled = 1;
rc = pci_enable_device(pdev);
}
if (rc) {
wil_err(wil,
"pci_enable_device failed, even with MSI only\n");
goto err_plat;
}
/* rollback to err_disable_pdev */
pci_set_power_state(pdev, PCI_D0);
rc = pci_request_region(pdev, 0, WIL_NAME);
if (rc) {
wil_err(wil, "pci_request_region failed\n");
goto err_disable_pdev;
}
/* rollback to err_release_reg */
wil->csr = pci_ioremap_bar(pdev, 0);
if (!wil->csr) {
wil_err(wil, "pci_ioremap_bar failed\n");
rc = -ENODEV;
goto err_release_reg;
}
/* rollback to err_iounmap */
wil_info(wil, "CSR at %pR -> 0x%p\n", &pdev->resource[0], wil->csr);
rc = wil_set_capabilities(wil);
if (rc) {
wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
goto err_iounmap;
}
/* device supports >32bit addresses.
* for legacy DMA start from 48 bit.
*/
start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
rc = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
dma_addr_size[i], rc);
continue;
}
dev_info(dev, "using dma mask %d", dma_addr_size[i]);
wil->dma_addr_size = dma_addr_size[i];
break;
}
if (wil->dma_addr_size == 0)
goto err_iounmap;
wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {
wil_err(wil, "Enable device failed\n");
goto err_iounmap;
}
/* rollback to bus_disable */
wil_clear_fw_log_addr(wil);
rc = wil_if_add(wil);
if (rc) {
wil_err(wil, "wil_if_add failed: %d\n", rc);
goto bus_disable;
}
/* in case of WMI-only FW, perform full reset and FW loading */
if (test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) {
wil_dbg_misc(wil, "Loading WMI only FW\n");
mutex_lock(&wil->mutex);
rc = wil_reset(wil, true);
mutex_unlock(&wil->mutex);
if (rc) {
wil_err(wil, "failed to load WMI only FW\n");
/* ignore the error to allow debugging */
}
}
if (IS_ENABLED(CONFIG_PM))
wil->pm_notify.notifier_call = wil6210_pm_notify;
rc = register_pm_notifier(&wil->pm_notify);
if (rc)
/* Do not fail the driver initialization, as suspend can
* be prevented in a later phase if needed
*/
wil_err(wil, "register_pm_notifier failed: %d\n", rc);
wil6210_debugfs_init(wil);
wil_pm_runtime_allow(wil);
return 0;
bus_disable:
wil_if_pcie_disable(wil);
err_iounmap:
pci_iounmap(pdev, wil->csr);
err_release_reg:
pci_release_region(pdev, 0);
err_disable_pdev:
pci_disable_device(pdev);
err_plat:
wil_platform_ops_uninit(wil);
if_free:
wil_if_free(wil);
return rc;
}
static void wil_pcie_remove(struct pci_dev *pdev)
{
struct wil6210_priv *wil = pci_get_drvdata(pdev);
void __iomem *csr = wil->csr;
wil_dbg_misc(wil, "pcie_remove\n");
unregister_pm_notifier(&wil->pm_notify);
wil_pm_runtime_forbid(wil);
wil6210_debugfs_remove(wil);
rtnl_lock();
wiphy_lock(wil->wiphy);
wil_p2p_wdev_free(wil);
wil_remove_all_additional_vifs(wil);
wiphy_unlock(wil->wiphy);
rtnl_unlock();
wil_if_remove(wil);
wil_if_pcie_disable(wil);
pci_iounmap(pdev, csr);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
wil_platform_ops_uninit(wil);
wil_if_free(wil);
}
static const struct pci_device_id wil6210_pcie_ids[] = {
{ PCI_DEVICE(0x1ae9, 0x0310) },
{ PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
{ PCI_DEVICE(0x17cb, 0x1201) }, /* Talyn */
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
static int wil6210_suspend(struct device *dev, bool is_runtime)
{
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
bool keep_radio_on, active_ifaces;
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
mutex_lock(&wil->vif_mutex);
active_ifaces = wil_has_active_ifaces(wil, true, false);
mutex_unlock(&wil->vif_mutex);
keep_radio_on = active_ifaces && wil->keep_radio_on_during_sleep;
rc = wil_can_suspend(wil, is_runtime);
if (rc)
goto out;
rc = wil_suspend(wil, is_runtime, keep_radio_on);
if (!rc) {
/* In case radio stays on, platform device will control
* PCIe master
*/
if (!keep_radio_on) {
/* disable bus mastering */
pci_clear_master(pdev);
wil->suspend_stats.r_off.successful_suspends++;
} else {
wil->suspend_stats.r_on.successful_suspends++;
}
}
out:
return rc;
}
static int wil6210_resume(struct device *dev, bool is_runtime)
{
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
bool keep_radio_on, active_ifaces;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
mutex_lock(&wil->vif_mutex);
active_ifaces = wil_has_active_ifaces(wil, true, false);
mutex_unlock(&wil->vif_mutex);
keep_radio_on = active_ifaces && wil->keep_radio_on_during_sleep;
/* In case radio stays on, platform device will control
* PCIe master
*/
if (!keep_radio_on)
/* allow master */
pci_set_master(pdev);
rc = wil_resume(wil, is_runtime, keep_radio_on);
if (rc) {
wil_err(wil, "device failed to resume (%d)\n", rc);
if (!keep_radio_on) {
pci_clear_master(pdev);
wil->suspend_stats.r_off.failed_resumes++;
} else {
wil->suspend_stats.r_on.failed_resumes++;
}
} else {
if (keep_radio_on)
wil->suspend_stats.r_on.successful_resumes++;
else
wil->suspend_stats.r_off.successful_resumes++;
}
return rc;
}
static int wil6210_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
struct wil6210_priv *wil = container_of(
notify_block, struct wil6210_priv, pm_notify);
int rc = 0;
enum wil_platform_event evt;
wil_dbg_pm(wil, "pm_notify: mode (%ld)\n", mode);
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
rc = wil_can_suspend(wil, false);
if (rc)
break;
evt = WIL_PLATFORM_EVT_PRE_SUSPEND;
if (wil->platform_ops.notify)
rc = wil->platform_ops.notify(wil->platform_handle,
evt);
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
evt = WIL_PLATFORM_EVT_POST_SUSPEND;
if (wil->platform_ops.notify)
rc = wil->platform_ops.notify(wil->platform_handle,
evt);
break;
default:
wil_dbg_pm(wil, "unhandled notify mode %ld\n", mode);
break;
}
wil_dbg_pm(wil, "notification mode %ld: rc (%d)\n", mode, rc);
return rc;
}
static int __maybe_unused wil6210_pm_suspend(struct device *dev)
{
return wil6210_suspend(dev, false);
}
static int __maybe_unused wil6210_pm_resume(struct device *dev)
{
return wil6210_resume(dev, false);
}
static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
{
struct wil6210_priv *wil = dev_get_drvdata(dev);
wil_dbg_pm(wil, "Runtime idle\n");
return wil_can_suspend(wil, true);
}
static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
{
return wil6210_resume(dev, true);
}
static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
{
struct wil6210_priv *wil = dev_get_drvdata(dev);
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
return 1;
}
return wil6210_suspend(dev, true);
}
static const struct dev_pm_ops wil6210_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
wil6210_pm_runtime_resume,
wil6210_pm_runtime_idle)
};
static struct pci_driver wil6210_driver = {
.probe = wil_pcie_probe,
.remove = wil_pcie_remove,
.id_table = wil6210_pcie_ids,
.name = WIL_NAME,
.driver = {
.pm = &wil6210_pm_ops,
},
};
static int __init wil6210_driver_init(void)
{
int rc;
rc = wil_platform_modinit();
if (rc)
return rc;
rc = pci_register_driver(&wil6210_driver);
if (rc)
wil_platform_modexit();
return rc;
}
module_init(wil6210_driver_init);
static void __exit wil6210_driver_exit(void)
{
pci_unregister_driver(&wil6210_driver);
wil_platform_modexit();
}
module_exit(wil6210_driver_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Qualcomm Atheros <[email protected]>");
MODULE_DESCRIPTION("Driver for 60g WiFi WIL6210 card");
|
linux-master
|
drivers/net/wireless/ath/wil6210/pcie_bus.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/moduleparam.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include "wil6210.h"
#include "txrx.h"
#include "wmi.h"
#include "trace.h"
/* set the default max assoc sta to max supported by driver */
uint max_assoc_sta = WIL6210_MAX_CID;
module_param(max_assoc_sta, uint, 0444);
MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
int agg_wsize; /* = 0; */
module_param(agg_wsize, int, 0644);
MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
" 0 - use default; < 0 - don't auto-establish");
u8 led_id = WIL_LED_INVALID_ID;
module_param(led_id, byte, 0444);
MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
#define WIL_WMI_PCP_STOP_TO_MS 5000
/**
* DOC: WMI event receiving - theory of operations
*
* When firmware about to report WMI event, it fills memory area
* in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
* the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
*
* @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the
* event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
* and handles events within the @wmi_event_worker. Every event get detached
* from list, processed and deleted.
*
* Purpose for this mechanism is to release IRQ thread; otherwise,
* if WMI event handling involves another WMI command flow, this 2-nd flow
* won't be completed because of blocked IRQ thread.
*/
/**
* DOC: Addressing - theory of operations
*
* There are several buses present on the WIL6210 card.
* Same memory areas are visible at different address on
* the different busses. There are 3 main bus masters:
* - MAC CPU (ucode)
* - User CPU (firmware)
* - AHB (host)
*
* On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
* AHB addresses starting from 0x880000
*
* Internally, firmware uses addresses that allow faster access but
* are invisible from the host. To read from these addresses, alternative
* AHB address must be used.
*/
/* sparrow_fw_mapping provides memory remapping table for sparrow
*
* array size should be in sync with the declaration in the wil6210.h
*
* Sparrow memory mapping:
* Linker address PCI/Host address
* 0x880000 .. 0xa80000 2Mb BAR0
* 0x800000 .. 0x808000 0x900000 .. 0x908000 32k DCCM
* 0x840000 .. 0x860000 0x908000 .. 0x928000 128k PERIPH
*/
const struct fw_map sparrow_fw_mapping[] = {
/* FW code RAM 256k */
{0x000000, 0x040000, 0x8c0000, "fw_code", true, true},
/* FW data RAM 32k */
{0x800000, 0x808000, 0x900000, "fw_data", true, true},
/* periph data 128k */
{0x840000, 0x860000, 0x908000, "fw_peri", true, true},
/* various RGF 40k */
{0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 512b */
{0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true},
/* upper area 548k */
{0x8c0000, 0x949000, 0x8c0000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 128k */
{0x000000, 0x020000, 0x920000, "uc_code", false, false},
/* ucode data RAM 16k */
{0x800000, 0x804000, 0x940000, "uc_data", false, false},
};
/* sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
* it is a bit larger to support extra features
*/
const struct fw_map sparrow_d0_mac_rgf_ext = {
0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
};
/* talyn_fw_mapping provides memory remapping table for Talyn
*
* array size should be in sync with the declaration in the wil6210.h
*
* Talyn memory mapping:
* Linker address PCI/Host address
* 0x880000 .. 0xc80000 4Mb BAR0
* 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
* 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
*/
const struct fw_map talyn_fw_mapping[] = {
/* FW code RAM 1M */
{0x000000, 0x100000, 0x900000, "fw_code", true, true},
/* FW data RAM 128k */
{0x800000, 0x820000, 0xa00000, "fw_data", true, true},
/* periph. data RAM 96k */
{0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
/* various RGF 40k */
{0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 1344b */
{0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true},
/* ext USER RGF 4k */
{0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
/* OTP 4k */
{0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
/* DMA EXT RGF 64k */
{0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
/* upper area 1536k */
{0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 256k */
{0x000000, 0x040000, 0xa38000, "uc_code", false, false},
/* ucode data RAM 32k */
{0x800000, 0x808000, 0xa78000, "uc_data", false, false},
};
/* talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
*
* array size should be in sync with the declaration in the wil6210.h
*
* Talyn MB memory mapping:
* Linker address PCI/Host address
* 0x880000 .. 0xc80000 4Mb BAR0
* 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
* 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
*/
const struct fw_map talyn_mb_fw_mapping[] = {
/* FW code RAM 768k */
{0x000000, 0x0c0000, 0x900000, "fw_code", true, true},
/* FW data RAM 128k */
{0x800000, 0x820000, 0xa00000, "fw_data", true, true},
/* periph. data RAM 96k */
{0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
/* various RGF 40k */
{0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 2256b */
{0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true},
/* ext USER RGF 4k */
{0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
/* SEC PKA 16k */
{0x890000, 0x894000, 0x890000, "sec_pka", true, true},
/* SEC KDF RGF 3096b */
{0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true},
/* SEC MAIN 2124b */
{0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true},
/* OTP 4k */
{0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
/* DMA EXT RGF 64k */
{0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
/* DUM USER RGF 528b */
{0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
/* DMA OFU 296b */
{0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
/* ucode debug 256b */
{0x8c3000, 0x8c3100, 0x8c3000, "ucode_debug", true, true},
/* upper area 1536k */
{0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 256k */
{0x000000, 0x040000, 0xa38000, "uc_code", false, false},
/* ucode data RAM 32k */
{0x800000, 0x808000, 0xa78000, "uc_data", false, false},
};
struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
struct blink_on_off_time led_blink_time[] = {
{WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS},
{WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS},
{WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS},
};
struct auth_no_hdr {
__le16 auth_alg;
__le16 auth_transaction;
__le16 status_code;
/* possibly followed by Challenge text */
u8 variable[];
} __packed;
u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
/**
* wmi_addr_remap - return AHB address for given firmware internal (linker) address
* @x: internal address
* If address have no valid AHB mapping, return 0
*/
static u32 wmi_addr_remap(u32 x)
{
uint i;
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
if (fw_mapping[i].fw &&
((x >= fw_mapping[i].from) && (x < fw_mapping[i].to)))
return x + fw_mapping[i].host - fw_mapping[i].from;
}
return 0;
}
/**
* wil_find_fw_mapping - find fw_mapping entry by section name
* @section: section name
*
* Return pointer to section or NULL if not found
*/
struct fw_map *wil_find_fw_mapping(const char *section)
{
int i;
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++)
if (fw_mapping[i].name &&
!strcmp(section, fw_mapping[i].name))
return &fw_mapping[i];
return NULL;
}
/**
* wmi_buffer_block - Check address validity for WMI buffer; remap if needed
* @wil: driver data
* @ptr_: internal (linker) fw/ucode address
* @size: if non zero, validate the block does not
* exceed the device memory (bar)
*
* Valid buffer should be DWORD aligned
*
* return address for accessing buffer from the host;
* if buffer is not valid, return NULL.
*/
void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
{
u32 off;
u32 ptr = le32_to_cpu(ptr_);
if (ptr % 4)
return NULL;
ptr = wmi_addr_remap(ptr);
if (ptr < WIL6210_FW_HOST_OFF)
return NULL;
off = HOSTADDR(ptr);
if (off > wil->bar_size - 4)
return NULL;
if (size && ((off + size > wil->bar_size) || (off + size < off)))
return NULL;
return wil->csr + off;
}
void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
{
return wmi_buffer_block(wil, ptr_, 0);
}
/* Check address validity */
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
{
u32 off;
if (ptr % 4)
return NULL;
if (ptr < WIL6210_FW_HOST_OFF)
return NULL;
off = HOSTADDR(ptr);
if (off > wil->bar_size - 4)
return NULL;
return wil->csr + off;
}
int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
struct wil6210_mbox_hdr *hdr)
{
void __iomem *src = wmi_buffer(wil, ptr);
if (!src)
return -EINVAL;
wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
return 0;
}
static const char *cmdid2name(u16 cmdid)
{
switch (cmdid) {
case WMI_NOTIFY_REQ_CMDID:
return "WMI_NOTIFY_REQ_CMD";
case WMI_START_SCAN_CMDID:
return "WMI_START_SCAN_CMD";
case WMI_CONNECT_CMDID:
return "WMI_CONNECT_CMD";
case WMI_DISCONNECT_CMDID:
return "WMI_DISCONNECT_CMD";
case WMI_SW_TX_REQ_CMDID:
return "WMI_SW_TX_REQ_CMD";
case WMI_GET_RF_SECTOR_PARAMS_CMDID:
return "WMI_GET_RF_SECTOR_PARAMS_CMD";
case WMI_SET_RF_SECTOR_PARAMS_CMDID:
return "WMI_SET_RF_SECTOR_PARAMS_CMD";
case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID:
return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD";
case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID:
return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD";
case WMI_BRP_SET_ANT_LIMIT_CMDID:
return "WMI_BRP_SET_ANT_LIMIT_CMD";
case WMI_TOF_SESSION_START_CMDID:
return "WMI_TOF_SESSION_START_CMD";
case WMI_AOA_MEAS_CMDID:
return "WMI_AOA_MEAS_CMD";
case WMI_PMC_CMDID:
return "WMI_PMC_CMD";
case WMI_TOF_GET_TX_RX_OFFSET_CMDID:
return "WMI_TOF_GET_TX_RX_OFFSET_CMD";
case WMI_TOF_SET_TX_RX_OFFSET_CMDID:
return "WMI_TOF_SET_TX_RX_OFFSET_CMD";
case WMI_VRING_CFG_CMDID:
return "WMI_VRING_CFG_CMD";
case WMI_BCAST_VRING_CFG_CMDID:
return "WMI_BCAST_VRING_CFG_CMD";
case WMI_TRAFFIC_SUSPEND_CMDID:
return "WMI_TRAFFIC_SUSPEND_CMD";
case WMI_TRAFFIC_RESUME_CMDID:
return "WMI_TRAFFIC_RESUME_CMD";
case WMI_ECHO_CMDID:
return "WMI_ECHO_CMD";
case WMI_SET_MAC_ADDRESS_CMDID:
return "WMI_SET_MAC_ADDRESS_CMD";
case WMI_LED_CFG_CMDID:
return "WMI_LED_CFG_CMD";
case WMI_PCP_START_CMDID:
return "WMI_PCP_START_CMD";
case WMI_PCP_STOP_CMDID:
return "WMI_PCP_STOP_CMD";
case WMI_SET_SSID_CMDID:
return "WMI_SET_SSID_CMD";
case WMI_GET_SSID_CMDID:
return "WMI_GET_SSID_CMD";
case WMI_SET_PCP_CHANNEL_CMDID:
return "WMI_SET_PCP_CHANNEL_CMD";
case WMI_GET_PCP_CHANNEL_CMDID:
return "WMI_GET_PCP_CHANNEL_CMD";
case WMI_P2P_CFG_CMDID:
return "WMI_P2P_CFG_CMD";
case WMI_PORT_ALLOCATE_CMDID:
return "WMI_PORT_ALLOCATE_CMD";
case WMI_PORT_DELETE_CMDID:
return "WMI_PORT_DELETE_CMD";
case WMI_START_LISTEN_CMDID:
return "WMI_START_LISTEN_CMD";
case WMI_START_SEARCH_CMDID:
return "WMI_START_SEARCH_CMD";
case WMI_DISCOVERY_STOP_CMDID:
return "WMI_DISCOVERY_STOP_CMD";
case WMI_DELETE_CIPHER_KEY_CMDID:
return "WMI_DELETE_CIPHER_KEY_CMD";
case WMI_ADD_CIPHER_KEY_CMDID:
return "WMI_ADD_CIPHER_KEY_CMD";
case WMI_SET_APPIE_CMDID:
return "WMI_SET_APPIE_CMD";
case WMI_CFG_RX_CHAIN_CMDID:
return "WMI_CFG_RX_CHAIN_CMD";
case WMI_TEMP_SENSE_CMDID:
return "WMI_TEMP_SENSE_CMD";
case WMI_DEL_STA_CMDID:
return "WMI_DEL_STA_CMD";
case WMI_DISCONNECT_STA_CMDID:
return "WMI_DISCONNECT_STA_CMD";
case WMI_RING_BA_EN_CMDID:
return "WMI_RING_BA_EN_CMD";
case WMI_RING_BA_DIS_CMDID:
return "WMI_RING_BA_DIS_CMD";
case WMI_RCP_DELBA_CMDID:
return "WMI_RCP_DELBA_CMD";
case WMI_RCP_ADDBA_RESP_CMDID:
return "WMI_RCP_ADDBA_RESP_CMD";
case WMI_RCP_ADDBA_RESP_EDMA_CMDID:
return "WMI_RCP_ADDBA_RESP_EDMA_CMD";
case WMI_PS_DEV_PROFILE_CFG_CMDID:
return "WMI_PS_DEV_PROFILE_CFG_CMD";
case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
return "WMI_SET_MGMT_RETRY_LIMIT_CMD";
case WMI_GET_MGMT_RETRY_LIMIT_CMDID:
return "WMI_GET_MGMT_RETRY_LIMIT_CMD";
case WMI_ABORT_SCAN_CMDID:
return "WMI_ABORT_SCAN_CMD";
case WMI_NEW_STA_CMDID:
return "WMI_NEW_STA_CMD";
case WMI_SET_THERMAL_THROTTLING_CFG_CMDID:
return "WMI_SET_THERMAL_THROTTLING_CFG_CMD";
case WMI_GET_THERMAL_THROTTLING_CFG_CMDID:
return "WMI_GET_THERMAL_THROTTLING_CFG_CMD";
case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID:
return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
case WMI_START_SCHED_SCAN_CMDID:
return "WMI_START_SCHED_SCAN_CMD";
case WMI_STOP_SCHED_SCAN_CMDID:
return "WMI_STOP_SCHED_SCAN_CMD";
case WMI_TX_STATUS_RING_ADD_CMDID:
return "WMI_TX_STATUS_RING_ADD_CMD";
case WMI_RX_STATUS_RING_ADD_CMDID:
return "WMI_RX_STATUS_RING_ADD_CMD";
case WMI_TX_DESC_RING_ADD_CMDID:
return "WMI_TX_DESC_RING_ADD_CMD";
case WMI_RX_DESC_RING_ADD_CMDID:
return "WMI_RX_DESC_RING_ADD_CMD";
case WMI_BCAST_DESC_RING_ADD_CMDID:
return "WMI_BCAST_DESC_RING_ADD_CMD";
case WMI_CFG_DEF_RX_OFFLOAD_CMDID:
return "WMI_CFG_DEF_RX_OFFLOAD_CMD";
case WMI_LINK_STATS_CMDID:
return "WMI_LINK_STATS_CMD";
case WMI_SW_TX_REQ_EXT_CMDID:
return "WMI_SW_TX_REQ_EXT_CMDID";
case WMI_FT_AUTH_CMDID:
return "WMI_FT_AUTH_CMD";
case WMI_FT_REASSOC_CMDID:
return "WMI_FT_REASSOC_CMD";
case WMI_UPDATE_FT_IES_CMDID:
return "WMI_UPDATE_FT_IES_CMD";
case WMI_RBUFCAP_CFG_CMDID:
return "WMI_RBUFCAP_CFG_CMD";
case WMI_TEMP_SENSE_ALL_CMDID:
return "WMI_TEMP_SENSE_ALL_CMDID";
case WMI_SET_LINK_MONITOR_CMDID:
return "WMI_SET_LINK_MONITOR_CMD";
default:
return "Untracked CMD";
}
}
static const char *eventid2name(u16 eventid)
{
switch (eventid) {
case WMI_NOTIFY_REQ_DONE_EVENTID:
return "WMI_NOTIFY_REQ_DONE_EVENT";
case WMI_DISCONNECT_EVENTID:
return "WMI_DISCONNECT_EVENT";
case WMI_SW_TX_COMPLETE_EVENTID:
return "WMI_SW_TX_COMPLETE_EVENT";
case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID:
return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT";
case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID:
return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT";
case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
case WMI_BRP_SET_ANT_LIMIT_EVENTID:
return "WMI_BRP_SET_ANT_LIMIT_EVENT";
case WMI_FW_READY_EVENTID:
return "WMI_FW_READY_EVENT";
case WMI_TRAFFIC_RESUME_EVENTID:
return "WMI_TRAFFIC_RESUME_EVENT";
case WMI_TOF_GET_TX_RX_OFFSET_EVENTID:
return "WMI_TOF_GET_TX_RX_OFFSET_EVENT";
case WMI_TOF_SET_TX_RX_OFFSET_EVENTID:
return "WMI_TOF_SET_TX_RX_OFFSET_EVENT";
case WMI_VRING_CFG_DONE_EVENTID:
return "WMI_VRING_CFG_DONE_EVENT";
case WMI_READY_EVENTID:
return "WMI_READY_EVENT";
case WMI_RX_MGMT_PACKET_EVENTID:
return "WMI_RX_MGMT_PACKET_EVENT";
case WMI_TX_MGMT_PACKET_EVENTID:
return "WMI_TX_MGMT_PACKET_EVENT";
case WMI_SCAN_COMPLETE_EVENTID:
return "WMI_SCAN_COMPLETE_EVENT";
case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID:
return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT";
case WMI_CONNECT_EVENTID:
return "WMI_CONNECT_EVENT";
case WMI_EAPOL_RX_EVENTID:
return "WMI_EAPOL_RX_EVENT";
case WMI_BA_STATUS_EVENTID:
return "WMI_BA_STATUS_EVENT";
case WMI_RCP_ADDBA_REQ_EVENTID:
return "WMI_RCP_ADDBA_REQ_EVENT";
case WMI_DELBA_EVENTID:
return "WMI_DELBA_EVENT";
case WMI_RING_EN_EVENTID:
return "WMI_RING_EN_EVENT";
case WMI_DATA_PORT_OPEN_EVENTID:
return "WMI_DATA_PORT_OPEN_EVENT";
case WMI_AOA_MEAS_EVENTID:
return "WMI_AOA_MEAS_EVENT";
case WMI_TOF_SESSION_END_EVENTID:
return "WMI_TOF_SESSION_END_EVENT";
case WMI_TOF_GET_CAPABILITIES_EVENTID:
return "WMI_TOF_GET_CAPABILITIES_EVENT";
case WMI_TOF_SET_LCR_EVENTID:
return "WMI_TOF_SET_LCR_EVENT";
case WMI_TOF_SET_LCI_EVENTID:
return "WMI_TOF_SET_LCI_EVENT";
case WMI_TOF_FTM_PER_DEST_RES_EVENTID:
return "WMI_TOF_FTM_PER_DEST_RES_EVENT";
case WMI_TOF_CHANNEL_INFO_EVENTID:
return "WMI_TOF_CHANNEL_INFO_EVENT";
case WMI_TRAFFIC_SUSPEND_EVENTID:
return "WMI_TRAFFIC_SUSPEND_EVENT";
case WMI_ECHO_RSP_EVENTID:
return "WMI_ECHO_RSP_EVENT";
case WMI_LED_CFG_DONE_EVENTID:
return "WMI_LED_CFG_DONE_EVENT";
case WMI_PCP_STARTED_EVENTID:
return "WMI_PCP_STARTED_EVENT";
case WMI_PCP_STOPPED_EVENTID:
return "WMI_PCP_STOPPED_EVENT";
case WMI_GET_SSID_EVENTID:
return "WMI_GET_SSID_EVENT";
case WMI_GET_PCP_CHANNEL_EVENTID:
return "WMI_GET_PCP_CHANNEL_EVENT";
case WMI_P2P_CFG_DONE_EVENTID:
return "WMI_P2P_CFG_DONE_EVENT";
case WMI_PORT_ALLOCATED_EVENTID:
return "WMI_PORT_ALLOCATED_EVENT";
case WMI_PORT_DELETED_EVENTID:
return "WMI_PORT_DELETED_EVENT";
case WMI_LISTEN_STARTED_EVENTID:
return "WMI_LISTEN_STARTED_EVENT";
case WMI_SEARCH_STARTED_EVENTID:
return "WMI_SEARCH_STARTED_EVENT";
case WMI_DISCOVERY_STOPPED_EVENTID:
return "WMI_DISCOVERY_STOPPED_EVENT";
case WMI_CFG_RX_CHAIN_DONE_EVENTID:
return "WMI_CFG_RX_CHAIN_DONE_EVENT";
case WMI_TEMP_SENSE_DONE_EVENTID:
return "WMI_TEMP_SENSE_DONE_EVENT";
case WMI_RCP_ADDBA_RESP_SENT_EVENTID:
return "WMI_RCP_ADDBA_RESP_SENT_EVENT";
case WMI_PS_DEV_PROFILE_CFG_EVENTID:
return "WMI_PS_DEV_PROFILE_CFG_EVENT";
case WMI_SET_MGMT_RETRY_LIMIT_EVENTID:
return "WMI_SET_MGMT_RETRY_LIMIT_EVENT";
case WMI_GET_MGMT_RETRY_LIMIT_EVENTID:
return "WMI_GET_MGMT_RETRY_LIMIT_EVENT";
case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID:
return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT";
case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID:
return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT";
case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID:
return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
case WMI_START_SCHED_SCAN_EVENTID:
return "WMI_START_SCHED_SCAN_EVENT";
case WMI_STOP_SCHED_SCAN_EVENTID:
return "WMI_STOP_SCHED_SCAN_EVENT";
case WMI_SCHED_SCAN_RESULT_EVENTID:
return "WMI_SCHED_SCAN_RESULT_EVENT";
case WMI_TX_STATUS_RING_CFG_DONE_EVENTID:
return "WMI_TX_STATUS_RING_CFG_DONE_EVENT";
case WMI_RX_STATUS_RING_CFG_DONE_EVENTID:
return "WMI_RX_STATUS_RING_CFG_DONE_EVENT";
case WMI_TX_DESC_RING_CFG_DONE_EVENTID:
return "WMI_TX_DESC_RING_CFG_DONE_EVENT";
case WMI_RX_DESC_RING_CFG_DONE_EVENTID:
return "WMI_RX_DESC_RING_CFG_DONE_EVENT";
case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID:
return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT";
case WMI_LINK_STATS_CONFIG_DONE_EVENTID:
return "WMI_LINK_STATS_CONFIG_DONE_EVENT";
case WMI_LINK_STATS_EVENTID:
return "WMI_LINK_STATS_EVENT";
case WMI_COMMAND_NOT_SUPPORTED_EVENTID:
return "WMI_COMMAND_NOT_SUPPORTED_EVENT";
case WMI_FT_AUTH_STATUS_EVENTID:
return "WMI_FT_AUTH_STATUS_EVENT";
case WMI_FT_REASSOC_STATUS_EVENTID:
return "WMI_FT_REASSOC_STATUS_EVENT";
case WMI_RBUFCAP_CFG_EVENTID:
return "WMI_RBUFCAP_CFG_EVENT";
case WMI_TEMP_SENSE_ALL_DONE_EVENTID:
return "WMI_TEMP_SENSE_ALL_DONE_EVENTID";
case WMI_SET_LINK_MONITOR_EVENTID:
return "WMI_SET_LINK_MONITOR_EVENT";
case WMI_LINK_MONITOR_EVENTID:
return "WMI_LINK_MONITOR_EVENT";
default:
return "Untracked EVENT";
}
}
static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid,
void *buf, u16 len)
{
struct {
struct wil6210_mbox_hdr hdr;
struct wmi_cmd_hdr wmi;
} __packed cmd = {
.hdr = {
.type = WIL_MBOX_HDR_TYPE_WMI,
.flags = 0,
.len = cpu_to_le16(sizeof(cmd.wmi) + len),
},
.wmi = {
.mid = mid,
.command_id = cpu_to_le16(cmdid),
},
};
struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
struct wil6210_mbox_ring_desc d_head;
u32 next_head;
void __iomem *dst;
void __iomem *head = wmi_addr(wil, r->head);
uint retry;
int rc = 0;
if (len > r->entry_size - sizeof(cmd)) {
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
(int)(sizeof(cmd) + len), r->entry_size);
return -ERANGE;
}
might_sleep();
if (!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "WMI: cannot send command while FW not ready\n");
return -EAGAIN;
}
/* Allow sending only suspend / resume commands during susepnd flow */
if ((test_bit(wil_status_suspending, wil->status) ||
test_bit(wil_status_suspended, wil->status) ||
test_bit(wil_status_resuming, wil->status)) &&
((cmdid != WMI_TRAFFIC_SUSPEND_CMDID) &&
(cmdid != WMI_TRAFFIC_RESUME_CMDID))) {
wil_err(wil, "WMI: reject send_command during suspend\n");
return -EINVAL;
}
if (!head) {
wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
return -EINVAL;
}
wil_halp_vote(wil);
/* read Tx head till it is not busy */
for (retry = 5; retry > 0; retry--) {
wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
if (d_head.sync == 0)
break;
msleep(20);
}
if (d_head.sync != 0) {
wil_err(wil, "WMI head busy\n");
rc = -EBUSY;
goto out;
}
/* next head */
next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
/* wait till FW finish with previous command */
for (retry = 5; retry > 0; retry--) {
if (!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "WMI: cannot send command while FW not ready\n");
rc = -EAGAIN;
goto out;
}
r->tail = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.tail));
if (next_head != r->tail)
break;
msleep(20);
}
if (next_head == r->tail) {
wil_err(wil, "WMI ring full\n");
rc = -EBUSY;
goto out;
}
dst = wmi_buffer(wil, d_head.addr);
if (!dst) {
wil_err(wil, "invalid WMI buffer: 0x%08x\n",
le32_to_cpu(d_head.addr));
rc = -EAGAIN;
goto out;
}
cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
/* set command */
wil_dbg_wmi(wil, "sending %s (0x%04x) [%d] mid %d\n",
cmdid2name(cmdid), cmdid, len, mid);
wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
sizeof(cmd), true);
wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
/* mark entry as full */
wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
/* advance next ptr */
wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
r->head = next_head);
trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
/* interrupt to FW */
wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
SW_INT_MBOX);
out:
wil_halp_unvote(wil);
return rc;
}
int wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len)
{
int rc;
mutex_lock(&wil->wmi_mutex);
rc = __wmi_send(wil, cmdid, mid, buf, len);
mutex_unlock(&wil->wmi_mutex);
return rc;
}
/*=== Event handlers ===*/
static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wiphy *wiphy = wil_to_wiphy(wil);
struct wmi_ready_event *evt = d;
u8 fw_max_assoc_sta;
wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
wil->fw_version, le32_to_cpu(evt->sw_version),
evt->mac, evt->numof_additional_mids);
if (evt->numof_additional_mids + 1 < wil->max_vifs) {
wil_err(wil, "FW does not support enough MIDs (need %d)",
wil->max_vifs - 1);
return; /* FW load will fail after timeout */
}
/* ignore MAC address, we already have it from the boot loader */
strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) {
wil_dbg_wmi(wil, "rfc calibration result %d\n",
evt->rfc_read_calib_result);
wil->fw_calib_result = evt->rfc_read_calib_result;
}
fw_max_assoc_sta = WIL6210_RX_DESC_MAX_CID;
if (len > offsetof(struct wmi_ready_event, max_assoc_sta) &&
evt->max_assoc_sta > 0) {
fw_max_assoc_sta = evt->max_assoc_sta;
wil_dbg_wmi(wil, "fw reported max assoc sta %d\n",
fw_max_assoc_sta);
if (fw_max_assoc_sta > WIL6210_MAX_CID) {
wil_dbg_wmi(wil,
"fw max assoc sta %d exceeds max driver supported %d\n",
fw_max_assoc_sta, WIL6210_MAX_CID);
fw_max_assoc_sta = WIL6210_MAX_CID;
}
}
wil->max_assoc_sta = min_t(uint, max_assoc_sta, fw_max_assoc_sta);
wil_dbg_wmi(wil, "setting max assoc sta to %d\n", wil->max_assoc_sta);
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_fwready, wil->status);
/* let the reset sequence continue */
complete(&wil->wmi_ready);
}
static void wmi_evt_rx_mgmt(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_rx_mgmt_packet_event *data = d;
struct wiphy *wiphy = wil_to_wiphy(wil);
struct ieee80211_mgmt *rx_mgmt_frame =
(struct ieee80211_mgmt *)data->payload;
int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
int ch_no;
u32 freq;
struct ieee80211_channel *channel;
s32 signal;
__le16 fc;
u32 d_len;
u16 d_status;
if (flen < 0) {
wil_err(wil, "MGMT Rx: short event, len %d\n", len);
return;
}
d_len = le32_to_cpu(data->info.len);
if (d_len != flen) {
wil_err(wil,
"MGMT Rx: length mismatch, d_len %d should be %d\n",
d_len, flen);
return;
}
ch_no = data->info.channel + 1;
freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
channel = ieee80211_get_channel(wiphy, freq);
if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
signal = 100 * data->info.rssi;
else
signal = data->info.sqi;
d_status = le16_to_cpu(data->info.status);
fc = rx_mgmt_frame->frame_control;
wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %s RSSI %d SQI %d%%\n",
data->info.channel, WIL_EXTENDED_MCS_CHECK(data->info.mcs),
data->info.rssi, data->info.sqi);
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
d_len, true);
if (!channel) {
wil_err(wil, "Frame on unsupported channel\n");
return;
}
if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
struct cfg80211_bss *bss;
struct cfg80211_inform_bss bss_data = {
.chan = channel,
.scan_width = NL80211_BSS_CHAN_WIDTH_20,
.signal = signal,
.boottime_ns = ktime_to_ns(ktime_get_boottime()),
};
u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
u.beacon.variable);
wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
wil_dbg_wmi(wil, "TSF : 0x%016llx\n", tsf);
wil_dbg_wmi(wil, "Beacon interval : %d\n", bi);
wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf,
ie_len, true);
wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
bss = cfg80211_inform_bss_frame_data(wiphy, &bss_data,
rx_mgmt_frame,
d_len, GFP_KERNEL);
if (bss) {
wil_dbg_wmi(wil, "Added BSS %pM\n",
rx_mgmt_frame->bssid);
cfg80211_put_bss(wiphy, bss);
} else {
wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
}
} else {
mutex_lock(&wil->vif_mutex);
cfg80211_rx_mgmt(vif_to_radio_wdev(wil, vif), freq, signal,
(void *)rx_mgmt_frame, d_len, 0);
mutex_unlock(&wil->vif_mutex);
}
}
static void wmi_evt_tx_mgmt(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wmi_tx_mgmt_packet_event *data = d;
struct ieee80211_mgmt *mgmt_frame =
(struct ieee80211_mgmt *)data->payload;
int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
flen, true);
}
static void wmi_evt_scan_complete(struct wil6210_vif *vif, int id,
void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
mutex_lock(&wil->vif_mutex);
if (vif->scan_request) {
struct wmi_scan_complete_event *data = d;
int status = le32_to_cpu(data->status);
struct cfg80211_scan_info info = {
.aborted = ((status != WMI_SCAN_SUCCESS) &&
(status != WMI_SCAN_ABORT_REJECTED)),
};
wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", status);
wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n",
vif->scan_request, info.aborted);
del_timer_sync(&vif->scan_timer);
cfg80211_scan_done(vif->scan_request, &info);
if (vif->mid == 0)
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
vif->scan_request = NULL;
wake_up_interruptible(&wil->wq);
if (vif->p2p.pending_listen_wdev) {
wil_dbg_misc(wil, "Scheduling delayed listen\n");
schedule_work(&vif->p2p.delayed_listen_work);
}
} else {
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
}
mutex_unlock(&wil->vif_mutex);
}
static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct wireless_dev *wdev = vif_to_wdev(vif);
struct wmi_connect_event *evt = d;
int ch; /* channel number */
struct station_info *sinfo;
u8 *assoc_req_ie, *assoc_resp_ie;
size_t assoc_req_ielen, assoc_resp_ielen;
/* capinfo(u16) + listen_interval(u16) + IEs */
const size_t assoc_req_ie_offset = sizeof(u16) * 2;
/* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
int rc;
if (len < sizeof(*evt)) {
wil_err(wil, "Connect event too short : %d bytes\n", len);
return;
}
if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
evt->assoc_resp_len) {
wil_err(wil,
"Connect event corrupted : %d != %d + %d + %d + %d\n",
len, (int)sizeof(*evt), evt->beacon_ie_len,
evt->assoc_req_len, evt->assoc_resp_len);
return;
}
if (evt->cid >= wil->max_assoc_sta) {
wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
return;
}
ch = evt->channel + 1;
wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n",
evt->bssid, ch, evt->cid, evt->aid);
wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
evt->assoc_info, len - sizeof(*evt), true);
/* figure out IE's */
assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
assoc_req_ie_offset];
assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
if (evt->assoc_req_len <= assoc_req_ie_offset) {
assoc_req_ie = NULL;
assoc_req_ielen = 0;
}
assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
evt->assoc_req_len +
assoc_resp_ie_offset];
assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
assoc_resp_ie = NULL;
assoc_resp_ielen = 0;
}
if (test_bit(wil_status_resetting, wil->status) ||
!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "status_resetting, cancel connect event, CID %d\n",
evt->cid);
/* no need for cleanup, wil_reset will do that */
return;
}
mutex_lock(&wil->mutex);
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (!test_bit(wil_vif_fwconnecting, vif->status)) {
wil_err(wil, "Not in connecting state\n");
mutex_unlock(&wil->mutex);
return;
}
del_timer_sync(&vif->connect_timer);
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
if (wil->sta[evt->cid].status != wil_sta_unused) {
wil_err(wil, "AP: Invalid status %d for CID %d\n",
wil->sta[evt->cid].status, evt->cid);
mutex_unlock(&wil->mutex);
return;
}
}
ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
wil->sta[evt->cid].mid = vif->mid;
wil->sta[evt->cid].status = wil_sta_conn_pending;
rc = wil_ring_init_tx(vif, evt->cid);
if (rc) {
wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
evt->cid, rc);
wmi_disconnect_sta(vif, wil->sta[evt->cid].addr,
WLAN_REASON_UNSPECIFIED, false);
} else {
wil_info(wil, "successful connection to CID %d\n", evt->cid);
}
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (rc) {
netif_carrier_off(ndev);
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
wil_err(wil, "cfg80211_connect_result with failure\n");
cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
goto out;
} else {
struct wiphy *wiphy = wil_to_wiphy(wil);
cfg80211_ref_bss(wiphy, vif->bss);
cfg80211_connect_bss(ndev, evt->bssid, vif->bss,
assoc_req_ie, assoc_req_ielen,
assoc_resp_ie, assoc_resp_ielen,
WLAN_STATUS_SUCCESS, GFP_KERNEL,
NL80211_TIMEOUT_UNSPECIFIED);
}
vif->bss = NULL;
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
if (rc) {
if (disable_ap_sme)
/* notify new_sta has failed */
cfg80211_del_sta(ndev, evt->bssid, GFP_KERNEL);
goto out;
}
sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
if (!sinfo) {
rc = -ENOMEM;
goto out;
}
sinfo->generation = wil->sinfo_gen++;
if (assoc_req_ie) {
sinfo->assoc_req_ies = assoc_req_ie;
sinfo->assoc_req_ies_len = assoc_req_ielen;
}
cfg80211_new_sta(ndev, evt->bssid, sinfo, GFP_KERNEL);
kfree(sinfo);
} else {
wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype,
evt->cid);
goto out;
}
wil->sta[evt->cid].status = wil_sta_connected;
wil->sta[evt->cid].aid = evt->aid;
if (!test_and_set_bit(wil_vif_fwconnected, vif->status))
atomic_inc(&wil->connected_vifs);
wil_update_net_queues_bh(wil, vif, NULL, false);
out:
if (rc) {
wil->sta[evt->cid].status = wil_sta_unused;
wil->sta[evt->cid].mid = U8_MAX;
}
clear_bit(wil_vif_fwconnecting, vif->status);
mutex_unlock(&wil->mutex);
}
static void wmi_evt_disconnect(struct wil6210_vif *vif, int id,
void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_disconnect_event *evt = d;
u16 reason_code = le16_to_cpu(evt->protocol_reason_status);
wil_info(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
evt->bssid, reason_code, evt->disconnect_reason);
wil->sinfo_gen++;
if (test_bit(wil_status_resetting, wil->status) ||
!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "status_resetting, cancel disconnect event\n");
/* no need for cleanup, wil_reset will do that */
return;
}
mutex_lock(&wil->mutex);
wil6210_disconnect_complete(vif, evt->bssid, reason_code);
if (disable_ap_sme) {
struct wireless_dev *wdev = vif_to_wdev(vif);
struct net_device *ndev = vif_to_ndev(vif);
/* disconnect event in disable_ap_sme mode means link loss */
switch (wdev->iftype) {
/* AP-like interface */
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
/* notify hostapd about link loss */
cfg80211_cqm_pktloss_notify(ndev, evt->bssid, 0,
GFP_KERNEL);
break;
default:
break;
}
}
mutex_unlock(&wil->mutex);
}
/*
* Firmware reports EAPOL frame using WME event.
* Reconstruct Ethernet frame and deliver it via normal Rx
*/
static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct wmi_eapol_rx_event *evt = d;
u16 eapol_len = le16_to_cpu(evt->eapol_len);
int sz = eapol_len + ETH_HLEN;
struct sk_buff *skb;
struct ethhdr *eth;
int cid;
struct wil_net_stats *stats = NULL;
wil_dbg_wmi(wil, "EAPOL len %d from %pM MID %d\n", eapol_len,
evt->src_mac, vif->mid);
cid = wil_find_cid(wil, vif->mid, evt->src_mac);
if (cid >= 0)
stats = &wil->sta[cid].stats;
if (eapol_len > 196) { /* TODO: revisit size limit */
wil_err(wil, "EAPOL too large\n");
return;
}
skb = alloc_skb(sz, GFP_KERNEL);
if (!skb) {
wil_err(wil, "Failed to allocate skb\n");
return;
}
eth = skb_put(skb, ETH_HLEN);
ether_addr_copy(eth->h_dest, ndev->dev_addr);
ether_addr_copy(eth->h_source, evt->src_mac);
eth->h_proto = cpu_to_be16(ETH_P_PAE);
skb_put_data(skb, evt->eapol, eapol_len);
skb->protocol = eth_type_trans(skb, ndev);
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += sz;
if (stats) {
stats->rx_packets++;
stats->rx_bytes += sz;
}
} else {
ndev->stats.rx_dropped++;
if (stats)
stats->rx_dropped++;
}
}
static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_ring_en_event *evt = d;
u8 vri = evt->ring_index;
struct wireless_dev *wdev = vif_to_wdev(vif);
struct wil_sta_info *sta;
u8 cid;
struct key_params params;
wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
if (vri >= ARRAY_SIZE(wil->ring_tx)) {
wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme ||
test_bit(wil_vif_ft_roam, vif->status))
/* in AP mode with disable_ap_sme that is not FT,
* this is done by wil_cfg80211_change_station()
*/
wil->ring_tx_data[vri].dot1x_open = true;
if (vri == vif->bcast_ring) /* no BA for bcast */
return;
cid = wil->ring2cid_tid[vri][0];
if (!wil_cid_valid(wil, cid)) {
wil_err(wil, "invalid cid %d for vring %d\n", cid, vri);
return;
}
/* In FT mode we get key but not store it as it is received
* before WMI_CONNECT_EVENT received from FW.
* wil_set_crypto_rx is called here to reset the security PN
*/
sta = &wil->sta[cid];
if (test_bit(wil_vif_ft_roam, vif->status)) {
memset(¶ms, 0, sizeof(params));
wil_set_crypto_rx(0, WMI_KEY_USE_PAIRWISE, sta, ¶ms);
if (wdev->iftype != NL80211_IFTYPE_AP)
clear_bit(wil_vif_ft_roam, vif->status);
}
if (agg_wsize >= 0)
wil_addba_tx_request(wil, vri, agg_wsize);
}
static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_ba_status_event *evt = d;
struct wil_ring_tx_data *txdata;
wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
evt->ringid,
evt->status == WMI_BA_AGREED ? "OK" : "N/A",
evt->agg_wsize, __le16_to_cpu(evt->ba_timeout),
evt->amsdu ? "+" : "-");
if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
wil_err(wil, "invalid ring id %d\n", evt->ringid);
return;
}
if (evt->status != WMI_BA_AGREED) {
evt->ba_timeout = 0;
evt->agg_wsize = 0;
evt->amsdu = 0;
}
txdata = &wil->ring_tx_data[evt->ringid];
txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
txdata->agg_wsize = evt->agg_wsize;
txdata->agg_amsdu = evt->amsdu;
txdata->addba_in_progress = false;
}
static void wmi_evt_addba_rx_req(struct wil6210_vif *vif, int id,
void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
u8 cid, tid;
struct wmi_rcp_addba_req_event *evt = d;
if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) {
parse_cidxtid(evt->cidxtid, &cid, &tid);
} else {
cid = evt->cid;
tid = evt->tid;
}
wil_addba_rx_request(wil, vif->mid, cid, tid, evt->dialog_token,
evt->ba_param_set, evt->ba_timeout,
evt->ba_seq_ctrl);
}
static void wmi_evt_delba(struct wil6210_vif *vif, int id, void *d, int len)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_delba_event *evt = d;
u8 cid, tid;
u16 reason = __le16_to_cpu(evt->reason);
struct wil_sta_info *sta;
struct wil_tid_ampdu_rx *r;
might_sleep();
if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) {
parse_cidxtid(evt->cidxtid, &cid, &tid);
} else {
cid = evt->cid;
tid = evt->tid;
}
if (!wil_cid_valid(wil, cid)) {
wil_err(wil, "DELBA: Invalid CID %d\n", cid);
return;
}
wil_dbg_wmi(wil, "DELBA MID %d CID %d TID %d from %s reason %d\n",
vif->mid, cid, tid,
evt->from_initiator ? "originator" : "recipient",
reason);
if (!evt->from_initiator) {
int i;
/* find Tx vring it belongs to */
for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
if (wil->ring2cid_tid[i][0] == cid &&
wil->ring2cid_tid[i][1] == tid) {
struct wil_ring_tx_data *txdata =
&wil->ring_tx_data[i];
wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
txdata->agg_timeout = 0;
txdata->agg_wsize = 0;
txdata->addba_in_progress = false;
break; /* max. 1 matching ring */
}
}
if (i >= ARRAY_SIZE(wil->ring2cid_tid))
wil_err(wil, "DELBA: unable to find Tx vring\n");
return;
}
sta = &wil->sta[cid];
spin_lock_bh(&sta->tid_rx_lock);
r = sta->tid_rx[tid];
sta->tid_rx[tid] = NULL;
wil_tid_ampdu_rx_free(wil, r);
spin_unlock_bh(&sta->tid_rx_lock);
}
static void
wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_sched_scan_result_event *data = d;
struct wiphy *wiphy = wil_to_wiphy(wil);
struct ieee80211_mgmt *rx_mgmt_frame =
(struct ieee80211_mgmt *)data->payload;
int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
int ch_no;
u32 freq;
struct ieee80211_channel *channel;
s32 signal;
__le16 fc;
u32 d_len;
struct cfg80211_bss *bss;
struct cfg80211_inform_bss bss_data = {
.scan_width = NL80211_BSS_CHAN_WIDTH_20,
.boottime_ns = ktime_to_ns(ktime_get_boottime()),
};
if (flen < 0) {
wil_err(wil, "sched scan result event too short, len %d\n",
len);
return;
}
d_len = le32_to_cpu(data->info.len);
if (d_len != flen) {
wil_err(wil,
"sched scan result length mismatch, d_len %d should be %d\n",
d_len, flen);
return;
}
fc = rx_mgmt_frame->frame_control;
if (!ieee80211_is_probe_resp(fc)) {
wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
fc);
return;
}
ch_no = data->info.channel + 1;
freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
channel = ieee80211_get_channel(wiphy, freq);
if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
signal = 100 * data->info.rssi;
else
signal = data->info.sqi;
wil_dbg_wmi(wil, "sched scan result: channel %d MCS %s RSSI %d\n",
data->info.channel, WIL_EXTENDED_MCS_CHECK(data->info.mcs),
data->info.rssi);
wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
d_len, data->info.qid, data->info.mid, data->info.cid);
wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
d_len, true);
if (!channel) {
wil_err(wil, "Frame on unsupported channel\n");
return;
}
bss_data.signal = signal;
bss_data.chan = channel;
bss = cfg80211_inform_bss_frame_data(wiphy, &bss_data, rx_mgmt_frame,
d_len, GFP_KERNEL);
if (bss) {
wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
cfg80211_put_bss(wiphy, bss);
} else {
wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
}
cfg80211_sched_scan_results(wiphy, 0);
}
static void wil_link_stats_store_basic(struct wil6210_vif *vif,
struct wmi_link_stats_basic *basic)
{
struct wil6210_priv *wil = vif_to_wil(vif);
u8 cid = basic->cid;
struct wil_sta_info *sta;
if (cid >= wil->max_assoc_sta) {
wil_err(wil, "invalid cid %d\n", cid);
return;
}
sta = &wil->sta[cid];
sta->fw_stats_basic = *basic;
}
static void wil_link_stats_store_global(struct wil6210_vif *vif,
struct wmi_link_stats_global *global)
{
struct wil6210_priv *wil = vif_to_wil(vif);
wil->fw_stats_global.stats = *global;
}
static void wmi_link_stats_parse(struct wil6210_vif *vif, u64 tsf,
bool has_next, void *payload,
size_t payload_size)
{
struct wil6210_priv *wil = vif_to_wil(vif);
size_t hdr_size = sizeof(struct wmi_link_stats_record);
size_t stats_size, record_size, expected_size;
struct wmi_link_stats_record *hdr;
if (payload_size < hdr_size) {
wil_err(wil, "link stats wrong event size %zu\n", payload_size);
return;
}
while (payload_size >= hdr_size) {
hdr = payload;
stats_size = le16_to_cpu(hdr->record_size);
record_size = hdr_size + stats_size;
if (payload_size < record_size) {
wil_err(wil, "link stats payload ended unexpectedly, size %zu < %zu\n",
payload_size, record_size);
return;
}
switch (hdr->record_type_id) {
case WMI_LINK_STATS_TYPE_BASIC:
expected_size = sizeof(struct wmi_link_stats_basic);
if (stats_size < expected_size) {
wil_err(wil, "link stats invalid basic record size %zu < %zu\n",
stats_size, expected_size);
return;
}
if (vif->fw_stats_ready) {
/* clean old statistics */
vif->fw_stats_tsf = 0;
vif->fw_stats_ready = false;
}
wil_link_stats_store_basic(vif, payload + hdr_size);
if (!has_next) {
vif->fw_stats_tsf = tsf;
vif->fw_stats_ready = true;
}
break;
case WMI_LINK_STATS_TYPE_GLOBAL:
expected_size = sizeof(struct wmi_link_stats_global);
if (stats_size < sizeof(struct wmi_link_stats_global)) {
wil_err(wil, "link stats invalid global record size %zu < %zu\n",
stats_size, expected_size);
return;
}
if (wil->fw_stats_global.ready) {
/* clean old statistics */
wil->fw_stats_global.tsf = 0;
wil->fw_stats_global.ready = false;
}
wil_link_stats_store_global(vif, payload + hdr_size);
if (!has_next) {
wil->fw_stats_global.tsf = tsf;
wil->fw_stats_global.ready = true;
}
break;
default:
break;
}
/* skip to next record */
payload += record_size;
payload_size -= record_size;
}
}
static void
wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_link_stats_event *evt = d;
size_t payload_size;
if (len < offsetof(struct wmi_link_stats_event, payload)) {
wil_err(wil, "stats event way too short %d\n", len);
return;
}
payload_size = le16_to_cpu(evt->payload_size);
if (len < sizeof(struct wmi_link_stats_event) + payload_size) {
wil_err(wil, "stats event too short %d\n", len);
return;
}
wmi_link_stats_parse(vif, le64_to_cpu(evt->tsf), evt->has_next,
evt->payload, payload_size);
}
/* find cid and ringid for the station vif
*
* return error, if other interfaces are used or ring was not found
*/
static int wil_find_cid_ringid_sta(struct wil6210_priv *wil,
struct wil6210_vif *vif,
int *cid,
int *ringid)
{
struct wil_ring *ring;
struct wil_ring_tx_data *txdata;
int min_ring_id = wil_get_min_tx_ring_id(wil);
int i;
u8 lcid;
if (!(vif->wdev.iftype == NL80211_IFTYPE_STATION ||
vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) {
wil_err(wil, "invalid interface type %d\n", vif->wdev.iftype);
return -EINVAL;
}
/* In the STA mode, it is expected to have only one ring
* for the AP we are connected to.
* find it and return the cid associated with it.
*/
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
ring = &wil->ring_tx[i];
txdata = &wil->ring_tx_data[i];
if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
continue;
lcid = wil->ring2cid_tid[i][0];
if (lcid >= wil->max_assoc_sta) /* skip BCAST */
continue;
wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid);
*cid = lcid;
*ringid = i;
return 0;
}
wil_dbg_wmi(wil, "find sta cid while no rings active?\n");
return -ENOENT;
}
static void
wmi_evt_auth_status(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct wmi_ft_auth_status_event *data = d;
int ie_len = len - offsetof(struct wmi_ft_auth_status_event, ie_info);
int rc, cid = 0, ringid = 0;
struct cfg80211_ft_event_params ft;
u16 d_len;
/* auth_alg(u16) + auth_transaction(u16) + status_code(u16) */
const size_t auth_ie_offset = sizeof(u16) * 3;
struct auth_no_hdr *auth = (struct auth_no_hdr *)data->ie_info;
/* check the status */
if (ie_len >= 0 && data->status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "FT: auth failed. status %d\n", data->status);
goto fail;
}
if (ie_len < auth_ie_offset) {
wil_err(wil, "FT: auth event too short, len %d\n", len);
goto fail;
}
d_len = le16_to_cpu(data->ie_len);
if (d_len != ie_len) {
wil_err(wil,
"FT: auth ie length mismatch, d_len %d should be %d\n",
d_len, ie_len);
goto fail;
}
if (!test_bit(wil_vif_ft_roam, wil->status)) {
wil_err(wil, "FT: Not in roaming state\n");
goto fail;
}
if (le16_to_cpu(auth->auth_transaction) != 2) {
wil_err(wil, "FT: auth error. auth_transaction %d\n",
le16_to_cpu(auth->auth_transaction));
goto fail;
}
if (le16_to_cpu(auth->auth_alg) != WLAN_AUTH_FT) {
wil_err(wil, "FT: auth error. auth_alg %d\n",
le16_to_cpu(auth->auth_alg));
goto fail;
}
wil_dbg_wmi(wil, "FT: Auth to %pM successfully\n", data->mac_addr);
wil_hex_dump_wmi("FT Auth ies : ", DUMP_PREFIX_OFFSET, 16, 1,
data->ie_info, d_len, true);
/* find cid and ringid */
rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid);
if (rc) {
wil_err(wil, "No valid cid found\n");
goto fail;
}
if (vif->privacy) {
/* For secure assoc, remove old keys */
rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr,
WMI_KEY_USE_PAIRWISE);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
goto fail;
}
rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr,
WMI_KEY_USE_RX_GROUP);
if (rc) {
wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
goto fail;
}
}
memset(&ft, 0, sizeof(ft));
ft.ies = data->ie_info + auth_ie_offset;
ft.ies_len = d_len - auth_ie_offset;
ft.target_ap = data->mac_addr;
cfg80211_ft_event(ndev, &ft);
return;
fail:
wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
}
static void
wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct wiphy *wiphy = wil_to_wiphy(wil);
struct wmi_ft_reassoc_status_event *data = d;
int ies_len = len - offsetof(struct wmi_ft_reassoc_status_event,
ie_info);
int rc = -ENOENT, cid = 0, ringid = 0;
int ch; /* channel number (primary) */
size_t assoc_req_ie_len = 0, assoc_resp_ie_len = 0;
u8 *assoc_req_ie = NULL, *assoc_resp_ie = NULL;
/* capinfo(u16) + listen_interval(u16) + current_ap mac addr + IEs */
const size_t assoc_req_ie_offset = sizeof(u16) * 2 + ETH_ALEN;
/* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
u16 d_len;
int freq;
struct cfg80211_roam_info info;
if (ies_len < 0) {
wil_err(wil, "ft reassoc event too short, len %d\n", len);
goto fail;
}
wil_dbg_wmi(wil, "Reasoc Status event: status=%d, aid=%d",
data->status, data->aid);
wil_dbg_wmi(wil, " mac_addr=%pM, beacon_ie_len=%d",
data->mac_addr, data->beacon_ie_len);
wil_dbg_wmi(wil, " reassoc_req_ie_len=%d, reassoc_resp_ie_len=%d",
le16_to_cpu(data->reassoc_req_ie_len),
le16_to_cpu(data->reassoc_resp_ie_len));
d_len = le16_to_cpu(data->beacon_ie_len) +
le16_to_cpu(data->reassoc_req_ie_len) +
le16_to_cpu(data->reassoc_resp_ie_len);
if (d_len != ies_len) {
wil_err(wil,
"ft reassoc ie length mismatch, d_len %d should be %d\n",
d_len, ies_len);
goto fail;
}
/* check the status */
if (data->status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "ft reassoc failed. status %d\n", data->status);
goto fail;
}
/* find cid and ringid */
rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid);
if (rc) {
wil_err(wil, "No valid cid found\n");
goto fail;
}
ch = data->channel + 1;
wil_info(wil, "FT: Roam %pM channel [%d] cid %d aid %d\n",
data->mac_addr, ch, cid, data->aid);
wil_hex_dump_wmi("reassoc AI : ", DUMP_PREFIX_OFFSET, 16, 1,
data->ie_info, len - sizeof(*data), true);
/* figure out IE's */
if (le16_to_cpu(data->reassoc_req_ie_len) > assoc_req_ie_offset) {
assoc_req_ie = &data->ie_info[assoc_req_ie_offset];
assoc_req_ie_len = le16_to_cpu(data->reassoc_req_ie_len) -
assoc_req_ie_offset;
}
if (le16_to_cpu(data->reassoc_resp_ie_len) <= assoc_resp_ie_offset) {
wil_err(wil, "FT: reassoc resp ie len is too short, len %d\n",
le16_to_cpu(data->reassoc_resp_ie_len));
goto fail;
}
assoc_resp_ie = &data->ie_info[le16_to_cpu(data->reassoc_req_ie_len) +
assoc_resp_ie_offset];
assoc_resp_ie_len = le16_to_cpu(data->reassoc_resp_ie_len) -
assoc_resp_ie_offset;
if (test_bit(wil_status_resetting, wil->status) ||
!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "FT: status_resetting, cancel reassoc event\n");
/* no need for cleanup, wil_reset will do that */
return;
}
mutex_lock(&wil->mutex);
/* ring modify to set the ring for the roamed AP settings */
wil_dbg_wmi(wil,
"ft modify tx config for connection CID %d ring %d\n",
cid, ringid);
rc = wil->txrx_ops.tx_ring_modify(vif, ringid, cid, 0);
if (rc) {
wil_err(wil, "modify TX for CID %d MID %d ring %d failed (%d)\n",
cid, vif->mid, ringid, rc);
mutex_unlock(&wil->mutex);
goto fail;
}
/* Update the driver STA members with the new bss */
wil->sta[cid].aid = data->aid;
wil->sta[cid].stats.ft_roams++;
ether_addr_copy(wil->sta[cid].addr, vif->bss->bssid);
mutex_unlock(&wil->mutex);
del_timer_sync(&vif->connect_timer);
cfg80211_ref_bss(wiphy, vif->bss);
freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ);
memset(&info, 0, sizeof(info));
info.links[0].channel = ieee80211_get_channel(wiphy, freq);
info.links[0].bss = vif->bss;
info.req_ie = assoc_req_ie;
info.req_ie_len = assoc_req_ie_len;
info.resp_ie = assoc_resp_ie;
info.resp_ie_len = assoc_resp_ie_len;
cfg80211_roamed(ndev, &info, GFP_KERNEL);
vif->bss = NULL;
return;
fail:
wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
}
static void
wmi_evt_link_monitor(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct wmi_link_monitor_event *evt = d;
enum nl80211_cqm_rssi_threshold_event event_type;
if (len < sizeof(*evt)) {
wil_err(wil, "link monitor event too short %d\n", len);
return;
}
wil_dbg_wmi(wil, "link monitor event, type %d rssi %d (stored %d)\n",
evt->type, evt->rssi_level, wil->cqm_rssi_thold);
if (evt->type != WMI_LINK_MONITOR_NOTIF_RSSI_THRESHOLD_EVT)
/* ignore */
return;
event_type = (evt->rssi_level > wil->cqm_rssi_thold ?
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW);
cfg80211_cqm_rssi_notify(ndev, event_type, evt->rssi_level, GFP_KERNEL);
}
/* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
*/
static void wmi_evt_ignore(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
wil_dbg_wmi(wil, "Ignore event 0x%04x len %d\n", id, len);
}
static const struct {
int eventid;
void (*handler)(struct wil6210_vif *vif,
int eventid, void *data, int data_len);
} wmi_evt_handlers[] = {
{WMI_READY_EVENTID, wmi_evt_ready},
{WMI_FW_READY_EVENTID, wmi_evt_ignore},
{WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
{WMI_TX_MGMT_PACKET_EVENTID, wmi_evt_tx_mgmt},
{WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
{WMI_CONNECT_EVENTID, wmi_evt_connect},
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
{WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_RING_EN_EVENTID, wmi_evt_ring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
{WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
{WMI_LINK_STATS_EVENTID, wmi_evt_link_stats},
{WMI_FT_AUTH_STATUS_EVENTID, wmi_evt_auth_status},
{WMI_FT_REASSOC_STATUS_EVENTID, wmi_evt_reassoc_status},
{WMI_LINK_MONITOR_EVENTID, wmi_evt_link_monitor},
};
/*
* Run in IRQ context
* Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
* that will be eventually handled by the @wmi_event_worker in the thread
* context of thread "wil6210_wmi"
*/
void wmi_recv_cmd(struct wil6210_priv *wil)
{
struct wil6210_mbox_ring_desc d_tail;
struct wil6210_mbox_hdr hdr;
struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
struct pending_wmi_event *evt;
u8 *cmd;
void __iomem *src;
ulong flags;
unsigned n;
unsigned int num_immed_reply = 0;
if (!test_bit(wil_status_mbox_ready, wil->status)) {
wil_err(wil, "Reset in progress. Cannot handle WMI event\n");
return;
}
if (test_bit(wil_status_suspended, wil->status)) {
wil_err(wil, "suspended. cannot handle WMI event\n");
return;
}
for (n = 0;; n++) {
u16 len;
bool q;
bool immed_reply = false;
r->head = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.head));
if (r->tail == r->head)
break;
wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n",
r->head, r->tail);
/* read cmd descriptor from tail */
wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
sizeof(struct wil6210_mbox_ring_desc));
if (d_tail.sync == 0) {
wil_err(wil, "Mbox evt not owned by FW?\n");
break;
}
/* read cmd header from descriptor */
if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
wil_err(wil, "Mbox evt at 0x%08x?\n",
le32_to_cpu(d_tail.addr));
break;
}
len = le16_to_cpu(hdr.len);
wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
hdr.flags);
/* read cmd buffer from descriptor */
src = wmi_buffer(wil, d_tail.addr) +
sizeof(struct wil6210_mbox_hdr);
evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
event.wmi) + len, 4),
GFP_KERNEL);
if (!evt)
break;
evt->event.hdr = hdr;
cmd = (void *)&evt->event.wmi;
wil_memcpy_fromio_32(cmd, src, len);
/* mark entry as empty */
wil_w(wil, r->tail +
offsetof(struct wil6210_mbox_ring_desc, sync), 0);
/* indicate */
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wmi_cmd_hdr))) {
struct wmi_cmd_hdr *wmi = &evt->event.wmi;
u16 id = le16_to_cpu(wmi->command_id);
u8 mid = wmi->mid;
u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
if (test_bit(wil_status_resuming, wil->status)) {
if (id == WMI_TRAFFIC_RESUME_EVENTID)
clear_bit(wil_status_resuming,
wil->status);
else
wil_err(wil,
"WMI evt %d while resuming\n",
id);
}
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
if (wil->reply_id && wil->reply_id == id &&
wil->reply_mid == mid) {
if (wil->reply_buf) {
memcpy(wil->reply_buf, wmi,
min(len, wil->reply_size));
immed_reply = true;
}
if (id == WMI_TRAFFIC_SUSPEND_EVENTID) {
wil_dbg_wmi(wil,
"set suspend_resp_rcvd\n");
wil->suspend_resp_rcvd = true;
}
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n",
eventid2name(id), id, wmi->mid, tstamp);
trace_wil6210_wmi_event(wmi, &wmi[1],
len - sizeof(*wmi));
}
wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
&evt->event.hdr, sizeof(hdr) + len, true);
/* advance tail */
r->tail = r->base + ((r->tail - r->base +
sizeof(struct wil6210_mbox_ring_desc)) % r->size);
wil_w(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
if (immed_reply) {
wil_dbg_wmi(wil, "recv_cmd: Complete WMI 0x%04x\n",
wil->reply_id);
kfree(evt);
num_immed_reply++;
complete(&wil->wmi_call);
} else {
/* add to the pending list */
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
list_add_tail(&evt->list, &wil->pending_wmi_ev);
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
q = queue_work(wil->wmi_wq, &wil->wmi_event_worker);
wil_dbg_wmi(wil, "queue_work -> %d\n", q);
}
}
/* normally, 1 event per IRQ should be processed */
wil_dbg_wmi(wil, "recv_cmd: -> %d events queued, %d completed\n",
n - num_immed_reply, num_immed_reply);
}
int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len,
u16 reply_id, void *reply, u16 reply_size, int to_msec)
{
int rc;
unsigned long remain;
ulong flags;
mutex_lock(&wil->wmi_mutex);
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
wil->reply_id = reply_id;
wil->reply_mid = mid;
wil->reply_buf = reply;
wil->reply_size = reply_size;
reinit_completion(&wil->wmi_call);
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
rc = __wmi_send(wil, cmdid, mid, buf, len);
if (rc)
goto out;
remain = wait_for_completion_timeout(&wil->wmi_call,
msecs_to_jiffies(to_msec));
if (0 == remain) {
wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
cmdid, reply_id, to_msec);
rc = -ETIME;
} else {
wil_dbg_wmi(wil,
"wmi_call(0x%04x->0x%04x) completed in %d msec\n",
cmdid, reply_id,
to_msec - jiffies_to_msecs(remain));
}
out:
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
wil->reply_id = 0;
wil->reply_mid = U8_MAX;
wil->reply_buf = NULL;
wil->reply_size = 0;
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
mutex_unlock(&wil->wmi_mutex);
return rc;
}
int wmi_echo(struct wil6210_priv *wil)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wmi_echo_cmd cmd = {
.value = cpu_to_le32(0x12345678),
};
return wmi_call(wil, WMI_ECHO_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_ECHO_RSP_EVENTID, NULL, 0,
WIL_WMI_CALL_GENERAL_TO_MS);
}
int wmi_set_mac_address(struct wil6210_priv *wil, const void *addr)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wmi_set_mac_address_cmd cmd;
ether_addr_copy(cmd.mac, addr);
wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, vif->mid,
&cmd, sizeof(cmd));
}
int wmi_led_cfg(struct wil6210_priv *wil, bool enable)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc = 0;
struct wmi_led_cfg_cmd cmd = {
.led_mode = enable,
.id = led_id,
.slow_blink_cfg.blink_on =
cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].on_ms),
.slow_blink_cfg.blink_off =
cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].off_ms),
.medium_blink_cfg.blink_on =
cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].on_ms),
.medium_blink_cfg.blink_off =
cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].off_ms),
.fast_blink_cfg.blink_on =
cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].on_ms),
.fast_blink_cfg.blink_off =
cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].off_ms),
.led_polarity = led_polarity,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_led_cfg_done_event evt;
} __packed reply = {
.evt = {.status = cpu_to_le32(WMI_FW_STATUS_FAILURE)},
};
if (led_id == WIL_LED_INVALID_ID)
goto out;
if (led_id > WIL_LED_MAX_ID) {
wil_err(wil, "Invalid led id %d\n", led_id);
rc = -EINVAL;
goto out;
}
wil_dbg_wmi(wil,
"%s led %d\n",
enable ? "enabling" : "disabling", led_id);
rc = wmi_call(wil, WMI_LED_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_LED_CFG_DONE_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
goto out;
if (reply.evt.status) {
wil_err(wil, "led %d cfg failed with status %d\n",
led_id, le32_to_cpu(reply.evt.status));
rc = -EINVAL;
}
out:
return rc;
}
int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct wmi_rbufcap_cfg_cmd cmd = {
.enable = enable,
.rx_desc_threshold = cpu_to_le16(threshold),
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_rbufcap_cfg_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
rc = wmi_call(wil, WMI_RBUFCAP_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_RBUFCAP_CFG_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "RBUFCAP_CFG failed. status %d\n",
reply.evt.status);
rc = -EINVAL;
}
return rc;
}
int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype,
u8 chan, u8 wmi_edmg_chan, u8 hidden_ssid, u8 is_go)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct wmi_pcp_start_cmd cmd = {
.bcon_interval = cpu_to_le16(bi),
.network_type = wmi_nettype,
.disable_sec_offload = 1,
.channel = chan - 1,
.edmg_channel = wmi_edmg_chan,
.pcp_max_assoc_sta = wil->max_assoc_sta,
.hidden_ssid = hidden_ssid,
.is_go = is_go,
.ap_sme_offload_mode = disable_ap_sme ?
WMI_AP_SME_OFFLOAD_PARTIAL :
WMI_AP_SME_OFFLOAD_FULL,
.abft_len = wil->abft_len,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_pcp_started_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
if (!vif->privacy)
cmd.disable_sec = 1;
if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) ||
(cmd.pcp_max_assoc_sta <= 0)) {
wil_err(wil, "unexpected max_assoc_sta %d\n",
cmd.pcp_max_assoc_sta);
return -EOPNOTSUPP;
}
if (disable_ap_sme &&
!test_bit(WMI_FW_CAPABILITY_AP_SME_OFFLOAD_PARTIAL,
wil->fw_capabilities)) {
wil_err(wil, "disable_ap_sme not supported by FW\n");
return -EOPNOTSUPP;
}
/*
* Processing time may be huge, in case of secure AP it takes about
* 3500ms for FW to start AP
*/
rc = wmi_call(wil, WMI_PCP_START_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000);
if (rc)
return rc;
if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
rc = -EINVAL;
if (wmi_nettype != WMI_NETTYPE_P2P)
/* Don't fail due to error in the led configuration */
wmi_led_cfg(wil, true);
return rc;
}
int wmi_pcp_stop(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
rc = wmi_led_cfg(wil, false);
if (rc)
return rc;
return wmi_call(wil, WMI_PCP_STOP_CMDID, vif->mid, NULL, 0,
WMI_PCP_STOPPED_EVENTID, NULL, 0,
WIL_WMI_PCP_STOP_TO_MS);
}
int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_set_ssid_cmd cmd = {
.ssid_len = cpu_to_le32(ssid_len),
};
if (ssid_len > sizeof(cmd.ssid))
return -EINVAL;
memcpy(cmd.ssid, ssid, ssid_len);
return wmi_send(wil, WMI_SET_SSID_CMDID, vif->mid, &cmd, sizeof(cmd));
}
int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_set_ssid_cmd cmd;
} __packed reply;
int len; /* reply.cmd.ssid_len in CPU order */
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_GET_SSID_CMDID, vif->mid, NULL, 0,
WMI_GET_SSID_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
len = le32_to_cpu(reply.cmd.ssid_len);
if (len > sizeof(reply.cmd.ssid))
return -EINVAL;
*ssid_len = len;
memcpy(ssid, reply.cmd.ssid, len);
return 0;
}
int wmi_set_channel(struct wil6210_priv *wil, int channel)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wmi_set_pcp_channel_cmd cmd = {
.channel = channel - 1,
};
return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, vif->mid,
&cmd, sizeof(cmd));
}
int wmi_get_channel(struct wil6210_priv *wil, int *channel)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_set_pcp_channel_cmd cmd;
} __packed reply;
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, vif->mid, NULL, 0,
WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.cmd.channel > 3)
return -EINVAL;
*channel = reply.cmd.channel + 1;
return 0;
}
int wmi_p2p_cfg(struct wil6210_vif *vif, int channel, int bi)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct wmi_p2p_cfg_cmd cmd = {
.discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER,
.bcon_interval = cpu_to_le16(bi),
.channel = channel - 1,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_p2p_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n");
rc = wmi_call(wil, WMI_P2P_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300);
if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status);
rc = -EINVAL;
}
return rc;
}
int wmi_start_listen(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_listen_started_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n");
rc = wmi_call(wil, WMI_START_LISTEN_CMDID, vif->mid, NULL, 0,
WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300);
if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "device failed to start listen. status %d\n",
reply.evt.status);
rc = -EINVAL;
}
return rc;
}
int wmi_start_search(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_search_started_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n");
rc = wmi_call(wil, WMI_START_SEARCH_CMDID, vif->mid, NULL, 0,
WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300);
if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "device failed to start search. status %d\n",
reply.evt.status);
rc = -EINVAL;
}
return rc;
}
int wmi_stop_discovery(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n");
rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0,
WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0,
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
wil_err(wil, "Failed to stop discovery\n");
return rc;
}
int wmi_del_cipher_key(struct wil6210_vif *vif, u8 key_index,
const void *mac_addr, int key_usage)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_delete_cipher_key_cmd cmd = {
.key_index = key_index,
};
if (mac_addr)
memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, vif->mid,
&cmd, sizeof(cmd));
}
int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
const void *mac_addr, int key_len, const void *key,
int key_usage)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_add_cipher_key_cmd cmd = {
.key_index = key_index,
.key_usage = key_usage,
.key_len = key_len,
};
if (key_len > sizeof(cmd.key))
return -EINVAL;
/* key len = 0 is allowed only for usage of WMI_KEY_USE_APPLY */
if ((key_len == 0 || !key) &&
key_usage != WMI_KEY_USE_APPLY_PTK)
return -EINVAL;
if (key)
memcpy(cmd.key, key, key_len);
if (mac_addr)
memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, vif->mid,
&cmd, sizeof(cmd));
}
int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie)
{
struct wil6210_priv *wil = vif_to_wil(vif);
static const char *const names[] = {
[WMI_FRAME_BEACON] = "BEACON",
[WMI_FRAME_PROBE_REQ] = "PROBE_REQ",
[WMI_FRAME_PROBE_RESP] = "WMI_FRAME_PROBE_RESP",
[WMI_FRAME_ASSOC_REQ] = "WMI_FRAME_ASSOC_REQ",
[WMI_FRAME_ASSOC_RESP] = "WMI_FRAME_ASSOC_RESP",
};
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
struct wmi_set_appie_cmd *cmd;
if (len < ie_len) {
rc = -EINVAL;
goto out;
}
cmd = kzalloc(len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
}
if (!ie)
ie_len = 0;
cmd->mgmt_frm_type = type;
/* BUG: FW API define ieLen as u8. Will fix FW */
cmd->ie_len = cpu_to_le16(ie_len);
if (ie_len)
memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_SET_APPIE_CMDID, vif->mid, cmd, len);
kfree(cmd);
out:
if (rc) {
const char *name = type < ARRAY_SIZE(names) ?
names[type] : "??";
wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
}
return rc;
}
int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie)
{
struct wil6210_priv *wil = vif_to_wil(vif);
u16 len;
struct wmi_update_ft_ies_cmd *cmd;
int rc;
if (!ie)
ie_len = 0;
len = sizeof(struct wmi_update_ft_ies_cmd) + ie_len;
if (len < ie_len) {
wil_err(wil, "wraparound. ie len %d\n", ie_len);
return -EINVAL;
}
cmd = kzalloc(len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
}
cmd->ie_len = cpu_to_le16(ie_len);
if (ie_len)
memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len);
kfree(cmd);
out:
if (rc)
wil_err(wil, "update ft ies failed : %d\n", rc);
return rc;
}
/**
* wmi_rxon - turn radio on/off
* @wil: driver data
* @on: turn on if true, off otherwise
*
* Only switch radio. Channel should be set separately.
* No timeout for rxon - radio turned on forever unless some other call
* turns it off
*/
int wmi_rxon(struct wil6210_priv *wil, bool on)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_listen_started_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
wil_info(wil, "(%s)\n", on ? "on" : "off");
if (on) {
rc = wmi_call(wil, WMI_START_LISTEN_CMDID, vif->mid, NULL, 0,
WMI_LISTEN_STARTED_EVENTID,
&reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
rc = -EINVAL;
} else {
rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0,
WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0,
WIL_WMI_CALL_GENERAL_TO_MS);
}
return rc;
}
int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wmi_cfg_rx_chain_cmd cmd = {
.action = WMI_RX_CHAIN_ADD,
.rx_sw_ring = {
.max_mpdu_size = cpu_to_le16(
wil_mtu2macbuf(wil->rx_buf_len)),
.ring_mem_base = cpu_to_le64(vring->pa),
.ring_size = cpu_to_le16(vring->size),
},
.mid = 0, /* TODO - what is it? */
.decap_trans_type = WMI_DECAP_TYPE_802_3,
.reorder_type = WMI_RX_SW_REORDER,
.host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_cfg_rx_chain_done_event evt;
} __packed evt;
int rc;
memset(&evt, 0, sizeof(evt));
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
struct ieee80211_channel *ch = wil->monitor_chandef.chan;
cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
if (ch)
cmd.sniffer_cfg.channel = ch->hw_value - 1;
cmd.sniffer_cfg.phy_info_mode =
cpu_to_le32(WMI_SNIFFER_PHY_INFO_DISABLED);
cmd.sniffer_cfg.phy_support =
cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS);
} else {
/* Initialize offload (in non-sniffer mode).
* Linux IP stack always calculates IP checksum
* HW always calculate TCP/UDP checksum
*/
cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
}
if (rx_align_2)
cmd.l2_802_3_offload_ctrl |=
L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK;
/* typical time for secure PCP is 840ms */
rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
if (rc)
return rc;
if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
rc = -EINVAL;
vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
le32_to_cpu(evt.evt.status), vring->hwtail);
return rc;
}
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct wmi_temp_sense_cmd cmd = {
.measure_baseband_en = cpu_to_le32(!!t_bb),
.measure_rf_en = cpu_to_le32(!!t_rf),
.measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_temp_sense_done_event evt;
} __packed reply;
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (t_bb)
*t_bb = le32_to_cpu(reply.evt.baseband_t1000);
if (t_rf)
*t_rf = le32_to_cpu(reply.evt.rf_t1000);
return 0;
}
int wmi_get_all_temperatures(struct wil6210_priv *wil,
struct wmi_temp_sense_all_done_event
*sense_all_evt)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct wmi_temp_sense_all_cmd cmd = {
.measure_baseband_en = true,
.measure_rf_en = true,
.measure_mode = TEMPERATURE_MEASURE_NOW,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_temp_sense_all_done_event evt;
} __packed reply;
if (!sense_all_evt) {
wil_err(wil, "Invalid sense_all_evt value\n");
return -EINVAL;
}
memset(&reply, 0, sizeof(reply));
reply.evt.status = WMI_FW_STATUS_FAILURE;
rc = wmi_call(wil, WMI_TEMP_SENSE_ALL_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_TEMP_SENSE_ALL_DONE_EVENTID,
&reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.status == WMI_FW_STATUS_FAILURE) {
wil_err(wil, "Failed getting TEMP_SENSE_ALL\n");
return -EINVAL;
}
memcpy(sense_all_evt, &reply.evt, sizeof(reply.evt));
return 0;
}
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
bool del_sta)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct wmi_disconnect_sta_cmd disc_sta_cmd = {
.disconnect_reason = cpu_to_le16(reason),
};
struct wmi_del_sta_cmd del_sta_cmd = {
.disconnect_reason = cpu_to_le16(reason),
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_disconnect_event evt;
} __packed reply;
wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
memset(&reply, 0, sizeof(reply));
vif->locally_generated_disc = true;
if (del_sta) {
ether_addr_copy(del_sta_cmd.dst_mac, mac);
rc = wmi_call(wil, WMI_DEL_STA_CMDID, vif->mid, &del_sta_cmd,
sizeof(del_sta_cmd), WMI_DISCONNECT_EVENTID,
&reply, sizeof(reply), 1000);
} else {
ether_addr_copy(disc_sta_cmd.dst_mac, mac);
rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, vif->mid,
&disc_sta_cmd, sizeof(disc_sta_cmd),
WMI_DISCONNECT_EVENTID,
&reply, sizeof(reply), 1000);
}
/* failure to disconnect in reasonable time treated as FW error */
if (rc) {
wil_fw_error_recovery(wil);
return rc;
}
wil->sinfo_gen++;
return 0;
}
int wmi_addba(struct wil6210_priv *wil, u8 mid,
u8 ringid, u8 size, u16 timeout)
{
u8 amsdu = wil->use_enhanced_dma_hw && wil->use_rx_hw_reordering &&
test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
wil->amsdu_en;
struct wmi_ring_ba_en_cmd cmd = {
.ring_id = ringid,
.agg_max_wsize = size,
.ba_timeout = cpu_to_le16(timeout),
.amsdu = amsdu,
};
wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d amsdu %d)\n",
ringid, size, timeout, amsdu);
return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
{
struct wmi_ring_ba_dis_cmd cmd = {
.ring_id = ringid,
.reason = cpu_to_le16(reason),
};
wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason)
{
struct wmi_rcp_delba_cmd cmd = {
.reason = cpu_to_le16(reason),
};
if (cid >= WIL6210_RX_DESC_MAX_CID) {
cmd.cidxtid = CIDXTID_EXTENDED_CID_TID;
cmd.cid = cid;
cmd.tid = tid;
} else {
cmd.cidxtid = mk_cidxtid(cid, tid);
}
wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cid,
tid, reason);
return wmi_send(wil, WMI_RCP_DELBA_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_addba_rx_resp(struct wil6210_priv *wil,
u8 mid, u8 cid, u8 tid, u8 token,
u16 status, bool amsdu, u16 agg_wsize, u16 timeout)
{
int rc;
struct wmi_rcp_addba_resp_cmd cmd = {
.dialog_token = token,
.status_code = cpu_to_le16(status),
/* bit 0: A-MSDU supported
* bit 1: policy (controlled by FW)
* bits 2..5: TID
* bits 6..15: buffer size
*/
.ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
(agg_wsize << 6)),
.ba_timeout = cpu_to_le16(timeout),
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_rcp_addba_resp_sent_event evt;
} __packed reply = {
.evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
};
if (cid >= WIL6210_RX_DESC_MAX_CID) {
cmd.cidxtid = CIDXTID_EXTENDED_CID_TID;
cmd.cid = cid;
cmd.tid = tid;
} else {
cmd.cidxtid = mk_cidxtid(cid, tid);
}
wil_dbg_wmi(wil,
"ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n",
mid, cid, tid, agg_wsize,
timeout, status, amsdu ? "+" : "-");
rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, mid, &cmd, sizeof(cmd),
WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.status) {
wil_err(wil, "ADDBA response failed with status %d\n",
le16_to_cpu(reply.evt.status));
rc = -EINVAL;
}
return rc;
}
int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
u8 token, u16 status, bool amsdu, u16 agg_wsize,
u16 timeout)
{
int rc;
struct wmi_rcp_addba_resp_edma_cmd cmd = {
.cid = cid,
.tid = tid,
.dialog_token = token,
.status_code = cpu_to_le16(status),
/* bit 0: A-MSDU supported
* bit 1: policy (controlled by FW)
* bits 2..5: TID
* bits 6..15: buffer size
*/
.ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
(agg_wsize << 6)),
.ba_timeout = cpu_to_le16(timeout),
/* route all the connections to status ring 0 */
.status_ring_id = WIL_DEFAULT_RX_STATUS_RING_ID,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_rcp_addba_resp_sent_event evt;
} __packed reply = {
.evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
};
wil_dbg_wmi(wil,
"ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s, sring_id %d\n",
cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-",
WIL_DEFAULT_RX_STATUS_RING_ID);
rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_EDMA_CMDID, mid, &cmd,
sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.status) {
wil_err(wil, "ADDBA response failed with status %d\n",
le16_to_cpu(reply.evt.status));
rc = -EINVAL;
}
return rc;
}
int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
enum wmi_ps_profile_type ps_profile)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct wmi_ps_dev_profile_cfg_cmd cmd = {
.ps_profile = ps_profile,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_ps_dev_profile_cfg_event evt;
} __packed reply = {
.evt = {.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR)},
};
u32 status;
wil_dbg_wmi(wil, "Setting ps dev profile %d\n", ps_profile);
rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
status = le32_to_cpu(reply.evt.status);
if (status != WMI_PS_CFG_CMD_STATUS_SUCCESS) {
wil_err(wil, "ps dev profile cfg failed with status %d\n",
status);
rc = -EINVAL;
}
return rc;
}
int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct wmi_set_mgmt_retry_limit_cmd cmd = {
.mgmt_retry_limit = retry_short,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_set_mgmt_retry_limit_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
wil_dbg_wmi(wil, "Setting mgmt retry short %d\n", retry_short);
if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities))
return -ENOTSUPP;
rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "set mgmt retry limit failed with status %d\n",
reply.evt.status);
rc = -EINVAL;
}
return rc;
}
int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_get_mgmt_retry_limit_event evt;
} __packed reply;
wil_dbg_wmi(wil, "getting mgmt retry short\n");
if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities))
return -ENOTSUPP;
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, vif->mid, NULL, 0,
WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (retry_short)
*retry_short = reply.evt.mgmt_retry_limit;
return 0;
}
int wmi_abort_scan(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
wil_dbg_wmi(wil, "sending WMI_ABORT_SCAN_CMDID\n");
rc = wmi_send(wil, WMI_ABORT_SCAN_CMDID, vif->mid, NULL, 0);
if (rc)
wil_err(wil, "Failed to abort scan (%d)\n", rc);
return rc;
}
int wmi_new_sta(struct wil6210_vif *vif, const u8 *mac, u8 aid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct wmi_new_sta_cmd cmd = {
.aid = aid,
};
wil_dbg_wmi(wil, "new sta %pM, aid %d\n", mac, aid);
ether_addr_copy(cmd.dst_mac, mac);
rc = wmi_send(wil, WMI_NEW_STA_CMDID, vif->mid, &cmd, sizeof(cmd));
if (rc)
wil_err(wil, "Failed to send new sta (%d)\n", rc);
return rc;
}
void wmi_event_flush(struct wil6210_priv *wil)
{
ulong flags;
struct pending_wmi_event *evt, *t;
wil_dbg_wmi(wil, "event_flush\n");
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
list_del(&evt->list);
kfree(evt);
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
}
static const char *suspend_status2name(u8 status)
{
switch (status) {
case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
return "LINK_NOT_IDLE";
case WMI_TRAFFIC_SUSPEND_REJECTED_DISCONNECT:
return "DISCONNECT";
case WMI_TRAFFIC_SUSPEND_REJECTED_OTHER:
return "OTHER";
default:
return "Untracked status";
}
}
int wmi_suspend(struct wil6210_priv *wil)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct wmi_traffic_suspend_cmd cmd = {
.wakeup_trigger = wil->wakeup_trigger,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_traffic_suspend_event evt;
} __packed reply = {
.evt = {.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE},
};
u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP;
wil->suspend_resp_rcvd = false;
wil->suspend_resp_comp = false;
rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
suspend_to);
if (rc) {
wil_err(wil, "wmi_call for suspend req failed, rc=%d\n", rc);
if (rc == -ETIME)
/* wmi_call TO */
wil->suspend_stats.rejected_by_device++;
else
wil->suspend_stats.rejected_by_host++;
goto out;
}
wil_dbg_wmi(wil, "waiting for suspend_response_completed\n");
rc = wait_event_interruptible_timeout(wil->wq,
wil->suspend_resp_comp,
msecs_to_jiffies(suspend_to));
if (rc == 0) {
wil_err(wil, "TO waiting for suspend_response_completed\n");
if (wil->suspend_resp_rcvd)
/* Device responded but we TO due to another reason */
wil->suspend_stats.rejected_by_host++;
else
wil->suspend_stats.rejected_by_device++;
rc = -EBUSY;
goto out;
}
wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) {
wil_dbg_pm(wil, "device rejected the suspend, %s\n",
suspend_status2name(reply.evt.status));
wil->suspend_stats.rejected_by_device++;
}
rc = reply.evt.status;
out:
wil->suspend_resp_rcvd = false;
wil->suspend_resp_comp = false;
return rc;
}
static void resume_triggers2string(u32 triggers, char *string, int str_size)
{
string[0] = '\0';
if (!triggers) {
strlcat(string, " UNKNOWN", str_size);
return;
}
if (triggers & WMI_RESUME_TRIGGER_HOST)
strlcat(string, " HOST", str_size);
if (triggers & WMI_RESUME_TRIGGER_UCAST_RX)
strlcat(string, " UCAST_RX", str_size);
if (triggers & WMI_RESUME_TRIGGER_BCAST_RX)
strlcat(string, " BCAST_RX", str_size);
if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
strlcat(string, " WMI_EVT", str_size);
if (triggers & WMI_RESUME_TRIGGER_DISCONNECT)
strlcat(string, " DISCONNECT", str_size);
}
int wmi_resume(struct wil6210_priv *wil)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
char string[100];
struct {
struct wmi_cmd_hdr wmi;
struct wmi_traffic_resume_event evt;
} __packed reply = {
.evt = {.status = WMI_TRAFFIC_RESUME_FAILED,
.resume_triggers =
cpu_to_le32(WMI_RESUME_TRIGGER_UNKNOWN)},
};
rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, vif->mid, NULL, 0,
WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
if (rc)
return rc;
resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string,
sizeof(string));
wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n",
reply.evt.status ? "failed" : "passed", string,
le32_to_cpu(reply.evt.resume_triggers));
return reply.evt.status;
}
int wmi_port_allocate(struct wil6210_priv *wil, u8 mid,
const u8 *mac, enum nl80211_iftype iftype)
{
int rc;
struct wmi_port_allocate_cmd cmd = {
.mid = mid,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_port_allocated_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
wil_dbg_misc(wil, "port allocate, mid %d iftype %d, mac %pM\n",
mid, iftype, mac);
ether_addr_copy(cmd.mac, mac);
switch (iftype) {
case NL80211_IFTYPE_STATION:
cmd.port_role = WMI_PORT_STA;
break;
case NL80211_IFTYPE_AP:
cmd.port_role = WMI_PORT_AP;
break;
case NL80211_IFTYPE_P2P_CLIENT:
cmd.port_role = WMI_PORT_P2P_CLIENT;
break;
case NL80211_IFTYPE_P2P_GO:
cmd.port_role = WMI_PORT_P2P_GO;
break;
/* what about monitor??? */
default:
wil_err(wil, "unsupported iftype: %d\n", iftype);
return -EINVAL;
}
rc = wmi_call(wil, WMI_PORT_ALLOCATE_CMDID, mid,
&cmd, sizeof(cmd),
WMI_PORT_ALLOCATED_EVENTID, &reply,
sizeof(reply), 300);
if (rc) {
wil_err(wil, "failed to allocate port, status %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "WMI_PORT_ALLOCATE returned status %d\n",
reply.evt.status);
return -EINVAL;
}
return 0;
}
int wmi_port_delete(struct wil6210_priv *wil, u8 mid)
{
int rc;
struct wmi_port_delete_cmd cmd = {
.mid = mid,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_port_deleted_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
wil_dbg_misc(wil, "port delete, mid %d\n", mid);
rc = wmi_call(wil, WMI_PORT_DELETE_CMDID, mid,
&cmd, sizeof(cmd),
WMI_PORT_DELETED_EVENTID, &reply,
sizeof(reply), 2000);
if (rc) {
wil_err(wil, "failed to delete port, status %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "WMI_PORT_DELETE returned status %d\n",
reply.evt.status);
return -EINVAL;
}
return 0;
}
static bool wmi_evt_call_handler(struct wil6210_vif *vif, int id,
void *d, int len)
{
uint i;
for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
if (wmi_evt_handlers[i].eventid == id) {
wmi_evt_handlers[i].handler(vif, id, d, len);
return true;
}
}
return false;
}
static void wmi_event_handle(struct wil6210_priv *wil,
struct wil6210_mbox_hdr *hdr)
{
u16 len = le16_to_cpu(hdr->len);
struct wil6210_vif *vif;
if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wmi_cmd_hdr))) {
struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]);
void *evt_data = (void *)(&wmi[1]);
u16 id = le16_to_cpu(wmi->command_id);
u8 mid = wmi->mid;
wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x,%d)\n",
eventid2name(id), id, wil->reply_id,
wil->reply_mid);
if (mid == MID_BROADCAST)
mid = 0;
if (mid >= GET_MAX_VIFS(wil)) {
wil_dbg_wmi(wil, "invalid mid %d, event skipped\n",
mid);
return;
}
vif = wil->vifs[mid];
if (!vif) {
wil_dbg_wmi(wil, "event for empty VIF(%d), skipped\n",
mid);
return;
}
/* check if someone waits for this event */
if (wil->reply_id && wil->reply_id == id &&
wil->reply_mid == mid) {
if (wil->reply_buf) {
/* event received while wmi_call is waiting
* with a buffer. Such event should be handled
* in wmi_recv_cmd function. Handling the event
* here means a previous wmi_call was timeout.
* Drop the event and do not handle it.
*/
wil_err(wil,
"Old event (%d, %s) while wmi_call is waiting. Drop it and Continue waiting\n",
id, eventid2name(id));
return;
}
wmi_evt_call_handler(vif, id, evt_data,
len - sizeof(*wmi));
wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n",
id);
complete(&wil->wmi_call);
return;
}
/* unsolicited event */
/* search for handler */
if (!wmi_evt_call_handler(vif, id, evt_data,
len - sizeof(*wmi))) {
wil_info(wil, "Unhandled event 0x%04x\n", id);
}
} else {
wil_err(wil, "Unknown event type\n");
print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
hdr, sizeof(*hdr) + len, true);
}
}
/*
* Retrieve next WMI event from the pending list
*/
static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
{
ulong flags;
struct list_head *ret = NULL;
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
if (!list_empty(&wil->pending_wmi_ev)) {
ret = wil->pending_wmi_ev.next;
list_del(ret);
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
return ret;
}
/*
* Handler for the WMI events
*/
void wmi_event_worker(struct work_struct *work)
{
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
wmi_event_worker);
struct pending_wmi_event *evt;
struct list_head *lh;
wil_dbg_wmi(wil, "event_worker: Start\n");
while ((lh = next_wmi_ev(wil)) != NULL) {
evt = list_entry(lh, struct pending_wmi_event, list);
wmi_event_handle(wil, &evt->event.hdr);
kfree(evt);
}
wil_dbg_wmi(wil, "event_worker: Finished\n");
}
bool wil_is_wmi_idle(struct wil6210_priv *wil)
{
ulong flags;
struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
bool rc = false;
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
/* Check if there are pending WMI events in the events queue */
if (!list_empty(&wil->pending_wmi_ev)) {
wil_dbg_pm(wil, "Pending WMI events in queue\n");
goto out;
}
/* Check if there is a pending WMI call */
if (wil->reply_id) {
wil_dbg_pm(wil, "Pending WMI call\n");
goto out;
}
/* Check if there are pending RX events in mbox */
r->head = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.head));
if (r->tail != r->head)
wil_dbg_pm(wil, "Pending WMI mbox events\n");
else
rc = true;
out:
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
return rc;
}
static void
wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
struct wmi_start_sched_scan_cmd *cmd,
struct cfg80211_ssid *ssids, int n_ssids,
struct cfg80211_match_set *match_sets,
int n_match_sets)
{
int i;
if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
n_match_sets, WMI_MAX_PNO_SSID_NUM);
n_match_sets = WMI_MAX_PNO_SSID_NUM;
}
cmd->num_of_ssids = n_match_sets;
for (i = 0; i < n_match_sets; i++) {
struct wmi_sched_scan_ssid_match *wmi_match =
&cmd->ssid_for_match[i];
struct cfg80211_match_set *cfg_match = &match_sets[i];
int j;
wmi_match->ssid_len = cfg_match->ssid.ssid_len;
memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
wmi_match->rssi_threshold = S8_MIN;
if (cfg_match->rssi_thold >= S8_MIN &&
cfg_match->rssi_thold <= S8_MAX)
wmi_match->rssi_threshold = cfg_match->rssi_thold;
for (j = 0; j < n_ssids; j++)
if (wmi_match->ssid_len == ssids[j].ssid_len &&
memcmp(wmi_match->ssid, ssids[j].ssid,
wmi_match->ssid_len) == 0)
wmi_match->add_ssid_to_probe = true;
}
}
static void
wmi_sched_scan_set_channels(struct wil6210_priv *wil,
struct wmi_start_sched_scan_cmd *cmd,
u32 n_channels,
struct ieee80211_channel **channels)
{
int i;
if (n_channels > WMI_MAX_CHANNEL_NUM) {
wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
n_channels, WMI_MAX_CHANNEL_NUM);
n_channels = WMI_MAX_CHANNEL_NUM;
}
cmd->num_of_channels = n_channels;
for (i = 0; i < n_channels; i++) {
struct ieee80211_channel *cfg_chan = channels[i];
cmd->channel_list[i] = cfg_chan->hw_value - 1;
}
}
static void
wmi_sched_scan_set_plans(struct wil6210_priv *wil,
struct wmi_start_sched_scan_cmd *cmd,
struct cfg80211_sched_scan_plan *scan_plans,
int n_scan_plans)
{
int i;
if (n_scan_plans > WMI_MAX_PLANS_NUM) {
wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
n_scan_plans, WMI_MAX_PLANS_NUM);
n_scan_plans = WMI_MAX_PLANS_NUM;
}
for (i = 0; i < n_scan_plans; i++) {
struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
cmd->scan_plans[i].interval_sec =
cpu_to_le16(cfg_plan->interval);
cmd->scan_plans[i].num_of_iterations =
cpu_to_le16(cfg_plan->iterations);
}
}
int wmi_start_sched_scan(struct wil6210_priv *wil,
struct cfg80211_sched_scan_request *request)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct wmi_start_sched_scan_cmd cmd = {
.min_rssi_threshold = S8_MIN,
.initial_delay_sec = cpu_to_le16(request->delay),
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_start_sched_scan_event evt;
} __packed reply = {
.evt = {.result = WMI_PNO_REJECT},
};
if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
return -ENOTSUPP;
if (request->min_rssi_thold >= S8_MIN &&
request->min_rssi_thold <= S8_MAX)
cmd.min_rssi_threshold = request->min_rssi_thold;
wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
request->match_sets, request->n_match_sets);
wmi_sched_scan_set_channels(wil, &cmd,
request->n_channels, request->channels);
wmi_sched_scan_set_plans(wil, &cmd,
request->scan_plans, request->n_scan_plans);
rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.result != WMI_PNO_SUCCESS) {
wil_err(wil, "start sched scan failed, result %d\n",
reply.evt.result);
return -EINVAL;
}
return 0;
}
int wmi_stop_sched_scan(struct wil6210_priv *wil)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_stop_sched_scan_event evt;
} __packed reply = {
.evt = {.result = WMI_PNO_REJECT},
};
if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
return -ENOTSUPP;
rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, vif->mid, NULL, 0,
WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.result != WMI_PNO_SUCCESS) {
wil_err(wil, "stop sched scan failed, result %d\n",
reply.evt.result);
return -EINVAL;
}
return 0;
}
int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
{
size_t total;
struct wil6210_priv *wil = vif_to_wil(vif);
struct ieee80211_mgmt *mgmt_frame = (void *)buf;
struct wmi_sw_tx_req_cmd *cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_sw_tx_complete_event evt;
} __packed evt = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
int rc;
wil_dbg_misc(wil, "mgmt_tx mid %d\n", vif->mid);
wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
if (len < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
total = sizeof(*cmd) + len;
if (total < len) {
wil_err(wil, "mgmt_tx invalid len %zu\n", len);
return -EINVAL;
}
cmd = kmalloc(total, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
cmd->len = cpu_to_le16(len);
memcpy(cmd->payload, buf, len);
rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n",
evt.evt.status);
rc = -EAGAIN;
}
kfree(cmd);
return rc;
}
int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
u8 channel, u16 duration_ms)
{
size_t total;
struct wil6210_priv *wil = vif_to_wil(vif);
struct ieee80211_mgmt *mgmt_frame = (void *)buf;
struct wmi_sw_tx_req_ext_cmd *cmd;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_sw_tx_complete_event evt;
} __packed evt = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
int rc;
wil_dbg_wmi(wil, "mgmt_tx_ext mid %d channel %d duration %d\n",
vif->mid, channel, duration_ms);
wil_hex_dump_wmi("mgmt_tx_ext frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
if (len < sizeof(struct ieee80211_hdr_3addr)) {
wil_err(wil, "short frame. len %zu\n", len);
return -EINVAL;
}
total = sizeof(*cmd) + len;
if (total < len) {
wil_err(wil, "mgmt_tx_ext invalid len %zu\n", len);
return -EINVAL;
}
cmd = kzalloc(total, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
cmd->len = cpu_to_le16(len);
memcpy(cmd->payload, buf, len);
cmd->channel = channel - 1;
cmd->duration_ms = cpu_to_le16(duration_ms);
rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n",
evt.evt.status);
rc = -EAGAIN;
}
kfree(cmd);
return rc;
}
int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id)
{
int rc;
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wil_status_ring *sring = &wil->srings[ring_id];
struct wmi_tx_status_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(sring->size),
},
.irq_index = WIL_TX_STATUS_IRQ_IDX
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_tx_status_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
cmd.ring_cfg.ring_id = ring_id;
cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
rc = wmi_call(wil, WMI_TX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_TX_STATUS_RING_CFG_DONE_EVENTID,
&reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
return 0;
}
int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, u16 max_rx_pl_per_desc)
{
struct net_device *ndev = wil->main_ndev;
struct wil6210_vif *vif = ndev_to_vif(ndev);
int rc;
struct wmi_cfg_def_rx_offload_cmd cmd = {
.max_msdu_size = cpu_to_le16(wil_mtu2macbuf(WIL_MAX_ETH_MTU)),
.max_rx_pl_per_desc = cpu_to_le16(max_rx_pl_per_desc),
.decap_trans_type = WMI_DECAP_TYPE_802_3,
.l2_802_3_offload_ctrl = 0,
.l3_l4_ctrl = 1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS,
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_cfg_def_rx_offload_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
rc = wmi_call(wil, WMI_CFG_DEF_RX_OFFLOAD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
return 0;
}
int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id)
{
struct net_device *ndev = wil->main_ndev;
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil_status_ring *sring = &wil->srings[ring_id];
int rc;
struct wmi_rx_status_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(sring->size),
.ring_id = ring_id,
},
.rx_msg_type = wil->use_compressed_rx_status ?
WMI_RX_MSG_TYPE_COMPRESSED :
WMI_RX_MSG_TYPE_EXTENDED,
.irq_index = WIL_RX_STATUS_IRQ_IDX,
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_rx_status_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
rc = wmi_call(wil, WMI_RX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_RX_STATUS_RING_CFG_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
return 0;
}
int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id)
{
struct net_device *ndev = wil->main_ndev;
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil_ring *ring = &wil->ring_rx;
int rc;
struct wmi_rx_desc_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(ring->size),
.ring_id = WIL_RX_DESC_RING_ID,
},
.status_ring_id = status_ring_id,
.irq_index = WIL_RX_STATUS_IRQ_IDX,
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_rx_desc_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa);
rc = wmi_call(wil, WMI_RX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_RX_DESC_RING_CFG_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
return 0;
}
int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int sring_id = wil->tx_sring_idx; /* there is only one TX sring */
int rc;
struct wil_ring *ring = &wil->ring_tx[ring_id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
struct wmi_tx_desc_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(ring->size),
.ring_id = ring_id,
},
.status_ring_id = sring_id,
.cid = cid,
.tid = tid,
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.schd_params = {
.priority = cpu_to_le16(0),
.timeslot_us = cpu_to_le16(0xfff),
}
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_tx_desc_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
rc = wmi_call(wil, WMI_TX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
spin_lock_bh(&txdata->lock);
ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
txdata->mid = vif->mid;
txdata->enabled = 1;
spin_unlock_bh(&txdata->lock);
return 0;
}
int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wil_ring *ring = &wil->ring_tx[ring_id];
int rc;
struct wmi_bcast_desc_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(ring->size),
.ring_id = ring_id,
},
.max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.status_ring_id = wil->tx_sring_idx,
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_rx_desc_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
rc = wmi_call(wil, WMI_BCAST_DESC_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_BCAST_DESC_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "Broadcast Tx config failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
spin_lock_bh(&txdata->lock);
ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
txdata->mid = vif->mid;
txdata->enabled = 1;
spin_unlock_bh(&txdata->lock);
return 0;
}
int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_link_stats_cmd cmd = {
.record_type_mask = cpu_to_le32(type),
.cid = cid,
.action = WMI_LINK_STATS_SNAPSHOT,
.interval_msec = cpu_to_le32(interval),
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_link_stats_config_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
int rc;
rc = wmi_call(wil, WMI_LINK_STATS_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_LINK_STATS_CONFIG_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_LINK_STATS_CMDID failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "Link statistics config failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
return 0;
}
int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
s32 rssi_thold, u32 rssi_hyst)
{
struct net_device *ndev = wil->main_ndev;
struct wil6210_vif *vif = ndev_to_vif(ndev);
int rc;
struct {
struct wmi_set_link_monitor_cmd cmd;
s8 rssi_thold;
} __packed cmd = {
.cmd = {
.rssi_hyst = rssi_hyst,
.rssi_thresholds_list_size = 1,
},
.rssi_thold = rssi_thold,
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_set_link_monitor_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
if (rssi_thold > S8_MAX || rssi_thold < S8_MIN || rssi_hyst > U8_MAX)
return -EINVAL;
rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_SET_LINK_MONITOR_EVENTID,
&reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/wmi.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <linux/power_supply.h>
#include "wil6210.h"
#include "wmi.h"
#include "txrx.h"
#include "pmc.h"
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
static u32 dbg_txdesc_index;
static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */
static u32 dbg_status_msg_index;
/* 0..wil->num_rx_status_rings-1 for Rx, wil->tx_sring_idx for Tx */
static u32 dbg_sring_index;
enum dbg_off_type {
doff_u32 = 0,
doff_x32 = 1,
doff_ulong = 2,
doff_io32 = 3,
doff_u8 = 4
};
/* offset to "wil" */
struct dbg_off {
const char *name;
umode_t mode;
ulong off;
enum dbg_off_type type;
};
static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
struct wil_ring *ring,
char _s, char _h, int idx)
{
u8 num_of_descs;
bool has_skb = false;
if (ring->is_rx) {
struct wil_rx_enhanced_desc *rx_d =
(struct wil_rx_enhanced_desc *)
&ring->va[idx].rx.enhanced;
u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
if (wil->rx_buff_mgmt.buff_arr &&
wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))
has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
seq_printf(s, "%c", (has_skb) ? _h : _s);
} else {
struct wil_tx_enhanced_desc *d =
(struct wil_tx_enhanced_desc *)
&ring->va[idx].tx.enhanced;
num_of_descs = (u8)d->mac.d[2];
has_skb = ring->ctx && ring->ctx[idx].skb;
if (num_of_descs >= 1)
seq_printf(s, "%c", has_skb ? _h : _s);
else
/* num_of_descs == 0, it's a frag in a list of descs */
seq_printf(s, "%c", has_skb ? 'h' : _s);
}
}
static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
const char *name, struct wil_ring *ring,
char _s, char _h)
{
void __iomem *x;
u32 v;
seq_printf(s, "RING %s = {\n", name);
seq_printf(s, " pa = %pad\n", &ring->pa);
seq_printf(s, " va = 0x%p\n", ring->va);
seq_printf(s, " size = %d\n", ring->size);
if (wil->use_enhanced_dma_hw && ring->is_rx)
seq_printf(s, " swtail = %u\n", *ring->edma_rx_swtail.va);
else
seq_printf(s, " swtail = %d\n", ring->swtail);
seq_printf(s, " swhead = %d\n", ring->swhead);
if (wil->use_enhanced_dma_hw) {
int ring_id = ring->is_rx ?
WIL_RX_DESC_RING_ID : ring - wil->ring_tx;
/* SUBQ_CONS is a table of 32 entries, one for each Q pair.
* lower 16bits are for even ring_id and upper 16bits are for
* odd ring_id
*/
x = wmi_addr(wil, RGF_DMA_SCM_SUBQ_CONS + 4 * (ring_id / 2));
v = readl_relaxed(x);
v = (ring_id % 2 ? (v >> 16) : (v & 0xffff));
seq_printf(s, " hwhead = %u\n", v);
}
seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail);
x = wmi_addr(wil, ring->hwtail);
if (x) {
v = readl(x);
seq_printf(s, "0x%08x = %d\n", v, v);
} else {
seq_puts(s, "???\n");
}
if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
for (i = 0; i < ring->size; i++) {
if ((i % 128) == 0 && i != 0)
seq_puts(s, "\n");
if (wil->use_enhanced_dma_hw) {
wil_print_desc_edma(s, wil, ring, _s, _h, i);
} else {
volatile struct vring_tx_desc *d =
&ring->va[i].tx.legacy;
seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
_s : (ring->ctx[i].skb ? _h : 'h'));
}
}
seq_puts(s, "\n");
}
seq_puts(s, "}\n");
}
static int ring_show(struct seq_file *s, void *data)
{
uint i;
struct wil6210_priv *wil = s->private;
wil_print_ring(s, wil, "rx", &wil->ring_rx, 'S', '_');
for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
struct wil_ring *ring = &wil->ring_tx[i];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
if (ring->va) {
int cid = wil->ring2cid_tid[i][0];
int tid = wil->ring2cid_tid[i][1];
u32 swhead = ring->swhead;
u32 swtail = ring->swtail;
int used = (ring->size + swhead - swtail)
% ring->size;
int avail = ring->size - used - 1;
char name[10];
char sidle[10];
/* performance monitoring */
cycles_t now = get_cycles();
uint64_t idle = txdata->idle * 100;
uint64_t total = now - txdata->begin;
if (total != 0) {
do_div(idle, total);
snprintf(sidle, sizeof(sidle), "%3d%%",
(int)idle);
} else {
snprintf(sidle, sizeof(sidle), "N/A");
}
txdata->begin = now;
txdata->idle = 0ULL;
snprintf(name, sizeof(name), "tx_%2d", i);
if (cid < wil->max_assoc_sta)
seq_printf(s,
"\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
wil->sta[cid].addr, cid, tid,
txdata->dot1x_open ? "+" : "-",
txdata->agg_wsize,
txdata->agg_timeout,
txdata->agg_amsdu ? "+" : "-",
used, avail, sidle);
else
seq_printf(s,
"\nBroadcast 1x%s [%3d|%3d] idle %s\n",
txdata->dot1x_open ? "+" : "-",
used, avail, sidle);
wil_print_ring(s, wil, name, ring, '_', 'H');
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ring);
static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
struct wil_status_ring *sring)
{
void __iomem *x;
int sring_idx = sring - wil->srings;
u32 v;
seq_printf(s, "Status Ring %s [ %d ] = {\n",
sring->is_rx ? "RX" : "TX", sring_idx);
seq_printf(s, " pa = %pad\n", &sring->pa);
seq_printf(s, " va = 0x%pK\n", sring->va);
seq_printf(s, " size = %d\n", sring->size);
seq_printf(s, " elem_size = %zu\n", sring->elem_size);
seq_printf(s, " swhead = %d\n", sring->swhead);
if (wil->use_enhanced_dma_hw) {
/* COMPQ_PROD is a table of 32 entries, one for each Q pair.
* lower 16bits are for even ring_id and upper 16bits are for
* odd ring_id
*/
x = wmi_addr(wil, RGF_DMA_SCM_COMPQ_PROD + 4 * (sring_idx / 2));
v = readl_relaxed(x);
v = (sring_idx % 2 ? (v >> 16) : (v & 0xffff));
seq_printf(s, " hwhead = %u\n", v);
}
seq_printf(s, " hwtail = [0x%08x] -> ", sring->hwtail);
x = wmi_addr(wil, sring->hwtail);
if (x) {
v = readl_relaxed(x);
seq_printf(s, "0x%08x = %d\n", v, v);
} else {
seq_puts(s, "???\n");
}
seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
seq_printf(s, " invalid_buff_id_cnt = %d\n",
sring->invalid_buff_id_cnt);
if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
for (i = 0; i < sring->size; i++) {
u32 *sdword_0 =
(u32 *)(sring->va + (sring->elem_size * i));
if ((i % 128) == 0 && i != 0)
seq_puts(s, "\n");
if (i == sring->swhead)
seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
'X' : 'x');
else
seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
'1' : '0');
}
seq_puts(s, "\n");
}
seq_puts(s, "}\n");
}
static int srings_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
int i = 0;
for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++)
if (wil->srings[i].va)
wil_print_sring(s, wil, &wil->srings[i]);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(srings);
static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
const char *prefix)
{
seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
}
static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
void __iomem *off)
{
struct wil6210_priv *wil = s->private;
struct wil6210_mbox_ring r;
int rsize;
uint i;
wil_halp_vote(wil);
if (wil_mem_access_lock(wil)) {
wil_halp_unvote(wil);
return;
}
wil_memcpy_fromio_32(&r, off, sizeof(r));
wil_mbox_ring_le2cpus(&r);
/*
* we just read memory block from NIC. This memory may be
* garbage. Check validity before using it.
*/
rsize = r.size / sizeof(struct wil6210_mbox_ring_desc);
seq_printf(s, "ring %s = {\n", prefix);
seq_printf(s, " base = 0x%08x\n", r.base);
seq_printf(s, " size = 0x%04x bytes -> %d entries\n", r.size, rsize);
seq_printf(s, " tail = 0x%08x\n", r.tail);
seq_printf(s, " head = 0x%08x\n", r.head);
seq_printf(s, " entry size = %d\n", r.entry_size);
if (r.size % sizeof(struct wil6210_mbox_ring_desc)) {
seq_printf(s, " ??? size is not multiple of %zd, garbage?\n",
sizeof(struct wil6210_mbox_ring_desc));
goto out;
}
if (!wmi_addr(wil, r.base) ||
!wmi_addr(wil, r.tail) ||
!wmi_addr(wil, r.head)) {
seq_puts(s, " ??? pointers are garbage?\n");
goto out;
}
for (i = 0; i < rsize; i++) {
struct wil6210_mbox_ring_desc d;
struct wil6210_mbox_hdr hdr;
size_t delta = i * sizeof(d);
void __iomem *x = wil->csr + HOSTADDR(r.base) + delta;
wil_memcpy_fromio_32(&d, x, sizeof(d));
seq_printf(s, " [%2x] %s %s%s 0x%08x", i,
d.sync ? "F" : "E",
(r.tail - r.base == delta) ? "t" : " ",
(r.head - r.base == delta) ? "h" : " ",
le32_to_cpu(d.addr));
if (0 == wmi_read_hdr(wil, d.addr, &hdr)) {
u16 len = le16_to_cpu(hdr.len);
seq_printf(s, " -> %04x %04x %04x %02x\n",
le16_to_cpu(hdr.seq), len,
le16_to_cpu(hdr.type), hdr.flags);
if (len <= MAX_MBOXITEM_SIZE) {
unsigned char databuf[MAX_MBOXITEM_SIZE];
void __iomem *src = wmi_buffer(wil, d.addr) +
sizeof(struct wil6210_mbox_hdr);
/*
* No need to check @src for validity -
* we already validated @d.addr while
* reading header
*/
wil_memcpy_fromio_32(databuf, src, len);
wil_seq_hexdump(s, databuf, len, " : ");
}
} else {
seq_puts(s, "\n");
}
}
out:
seq_puts(s, "}\n");
wil_mem_access_unlock(wil);
wil_halp_unvote(wil);
}
static int mbox_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
int ret;
ret = wil_pm_runtime_get(wil);
if (ret < 0)
return ret;
wil_print_mbox_ring(s, "tx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx));
wil_print_mbox_ring(s, "rx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx));
wil_pm_runtime_put(wil);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mbox);
static int wil_debugfs_iomem_x32_set(void *data, u64 val)
{
struct wil_debugfs_iomem_data *d = (struct
wil_debugfs_iomem_data *)data;
struct wil6210_priv *wil = d->wil;
int ret;
ret = wil_pm_runtime_get(wil);
if (ret < 0)
return ret;
writel_relaxed(val, (void __iomem *)d->offset);
wmb(); /* make sure write propagated to HW */
wil_pm_runtime_put(wil);
return 0;
}
static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
{
struct wil_debugfs_iomem_data *d = (struct
wil_debugfs_iomem_data *)data;
struct wil6210_priv *wil = d->wil;
int ret;
ret = wil_pm_runtime_get(wil);
if (ret < 0)
return ret;
*val = readl((void __iomem *)d->offset);
wil_pm_runtime_put(wil);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
wil_debugfs_iomem_x32_set, "0x%08llx\n");
static void wil_debugfs_create_iomem_x32(const char *name, umode_t mode,
struct dentry *parent, void *value,
struct wil6210_priv *wil)
{
struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
wil->dbg_data.iomem_data_count];
data->wil = wil;
data->offset = value;
debugfs_create_file_unsafe(name, mode, parent, data, &fops_iomem_x32);
wil->dbg_data.iomem_data_count++;
}
static int wil_debugfs_ulong_set(void *data, u64 val)
{
*(ulong *)data = val;
return 0;
}
static int wil_debugfs_ulong_get(void *data, u64 *val)
{
*val = *(ulong *)data;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
wil_debugfs_ulong_set, "0x%llx\n");
/**
* wil6210_debugfs_init_offset - create set of debugfs files
* @wil: driver's context, used for printing
* @dbg: directory on the debugfs, where files will be created
* @base: base address used in address calculation
* @tbl: table with file descriptions. Should be terminated with empty element.
*
* Creates files accordingly to the @tbl.
*/
static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
struct dentry *dbg, void *base,
const struct dbg_off * const tbl)
{
int i;
for (i = 0; tbl[i].name; i++) {
switch (tbl[i].type) {
case doff_u32:
debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg,
base + tbl[i].off);
break;
case doff_x32:
debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg,
base + tbl[i].off);
break;
case doff_ulong:
debugfs_create_file_unsafe(tbl[i].name, tbl[i].mode,
dbg, base + tbl[i].off,
&wil_fops_ulong);
break;
case doff_io32:
wil_debugfs_create_iomem_x32(tbl[i].name, tbl[i].mode,
dbg, base + tbl[i].off,
wil);
break;
case doff_u8:
debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
base + tbl[i].off);
break;
}
}
}
static const struct dbg_off isr_off[] = {
{"ICC", 0644, offsetof(struct RGF_ICR, ICC), doff_io32},
{"ICR", 0644, offsetof(struct RGF_ICR, ICR), doff_io32},
{"ICM", 0644, offsetof(struct RGF_ICR, ICM), doff_io32},
{"ICS", 0244, offsetof(struct RGF_ICR, ICS), doff_io32},
{"IMV", 0644, offsetof(struct RGF_ICR, IMV), doff_io32},
{"IMS", 0244, offsetof(struct RGF_ICR, IMS), doff_io32},
{"IMC", 0244, offsetof(struct RGF_ICR, IMC), doff_io32},
{},
};
static void wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
const char *name, struct dentry *parent,
u32 off)
{
struct dentry *d = debugfs_create_dir(name, parent);
wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr + off,
isr_off);
}
static const struct dbg_off pseudo_isr_off[] = {
{"CAUSE", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
{"MASK_SW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
{"MASK_FW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
{},
};
static void wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
struct dentry *parent)
{
struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
pseudo_isr_off);
}
static const struct dbg_off lgc_itr_cnt_off[] = {
{"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
{"DATA", 0644, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
{"CTL", 0644, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
{},
};
static const struct dbg_off tx_itr_cnt_off[] = {
{"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
doff_io32},
{"DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
doff_io32},
{"CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
doff_io32},
{"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
doff_io32},
{"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
doff_io32},
{"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
doff_io32},
{},
};
static const struct dbg_off rx_itr_cnt_off[] = {
{"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
doff_io32},
{"DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
doff_io32},
{"CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
doff_io32},
{"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
doff_io32},
{"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
doff_io32},
{"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
doff_io32},
{},
};
static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
struct dentry *parent)
{
struct dentry *d, *dtx, *drx;
d = debugfs_create_dir("ITR_CNT", parent);
dtx = debugfs_create_dir("TX", d);
drx = debugfs_create_dir("RX", d);
wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
lgc_itr_cnt_off);
wil6210_debugfs_init_offset(wil, dtx, (void * __force)wil->csr,
tx_itr_cnt_off);
wil6210_debugfs_init_offset(wil, drx, (void * __force)wil->csr,
rx_itr_cnt_off);
return 0;
}
static int memread_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
void __iomem *a;
int ret;
ret = wil_pm_runtime_get(wil);
if (ret < 0)
return ret;
ret = wil_mem_access_lock(wil);
if (ret) {
wil_pm_runtime_put(wil);
return ret;
}
a = wmi_buffer(wil, cpu_to_le32(mem_addr));
if (a)
seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
else
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
wil_mem_access_unlock(wil);
wil_pm_runtime_put(wil);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(memread);
static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
enum { max_count = 4096 };
struct wil_blob_wrapper *wil_blob = file->private_data;
struct wil6210_priv *wil = wil_blob->wil;
loff_t aligned_pos, pos = *ppos;
size_t available = wil_blob->blob.size;
void *buf;
size_t unaligned_bytes, aligned_count, ret;
int rc;
if (pos < 0)
return -EINVAL;
if (pos >= available || !count)
return 0;
if (count > available - pos)
count = available - pos;
if (count > max_count)
count = max_count;
/* set pos to 4 bytes aligned */
unaligned_bytes = pos % 4;
aligned_pos = pos - unaligned_bytes;
aligned_count = count + unaligned_bytes;
buf = kmalloc(aligned_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
rc = wil_pm_runtime_get(wil);
if (rc < 0) {
kfree(buf);
return rc;
}
rc = wil_mem_access_lock(wil);
if (rc) {
kfree(buf);
wil_pm_runtime_put(wil);
return rc;
}
wil_memcpy_fromio_32(buf, (const void __iomem *)
wil_blob->blob.data + aligned_pos, aligned_count);
ret = copy_to_user(user_buf, buf + unaligned_bytes, count);
wil_mem_access_unlock(wil);
wil_pm_runtime_put(wil);
kfree(buf);
if (ret == count)
return -EFAULT;
count -= ret;
*ppos = pos + count;
return count;
}
static const struct file_operations fops_ioblob = {
.read = wil_read_file_ioblob,
.open = simple_open,
.llseek = default_llseek,
};
static
struct dentry *wil_debugfs_create_ioblob(const char *name,
umode_t mode,
struct dentry *parent,
struct wil_blob_wrapper *wil_blob)
{
return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob);
}
/*---write channel 1..4 to rxon for it, 0 to rxoff---*/
static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
int rc;
long channel;
bool on;
char *kbuf = memdup_user_nul(buf, len);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
rc = kstrtol(kbuf, 0, &channel);
kfree(kbuf);
if (rc)
return rc;
if ((channel < 0) || (channel > 4)) {
wil_err(wil, "Invalid channel %ld\n", channel);
return -EINVAL;
}
on = !!channel;
if (on) {
rc = wmi_set_channel(wil, (int)channel);
if (rc)
return rc;
}
rc = wmi_rxon(wil, on);
if (rc)
return rc;
return len;
}
static const struct file_operations fops_rxon = {
.write = wil_write_file_rxon,
.open = simple_open,
};
static ssize_t wil_write_file_rbufcap(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
int val;
int rc;
rc = kstrtoint_from_user(buf, count, 0, &val);
if (rc) {
wil_err(wil, "Invalid argument\n");
return rc;
}
/* input value: negative to disable, 0 to use system default,
* 1..ring size to set descriptor threshold
*/
wil_info(wil, "%s RBUFCAP, descriptors threshold - %d\n",
val < 0 ? "Disabling" : "Enabling", val);
if (!wil->ring_rx.va || val > wil->ring_rx.size) {
wil_err(wil, "Invalid descriptors threshold, %d\n", val);
return -EINVAL;
}
rc = wmi_rbufcap_cfg(wil, val < 0 ? 0 : 1, val < 0 ? 0 : val);
if (rc) {
wil_err(wil, "RBUFCAP config failed: %d\n", rc);
return rc;
}
return count;
}
static const struct file_operations fops_rbufcap = {
.write = wil_write_file_rbufcap,
.open = simple_open,
};
/* block ack control, write:
* - "add <ringid> <agg_size> <timeout>" to trigger ADDBA
* - "del_tx <ringid> <reason>" to trigger DELBA for Tx side
* - "del_rx <CID> <TID> <reason>" to trigger DELBA for Rx side
*/
static ssize_t wil_write_back(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
int rc;
char *kbuf = kmalloc(len + 1, GFP_KERNEL);
char cmd[9];
int p1, p2, p3;
if (!kbuf)
return -ENOMEM;
rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
if (rc != len) {
kfree(kbuf);
return rc >= 0 ? -EIO : rc;
}
kbuf[len] = '\0';
rc = sscanf(kbuf, "%8s %d %d %d", cmd, &p1, &p2, &p3);
kfree(kbuf);
if (rc < 0)
return rc;
if (rc < 2)
return -EINVAL;
if ((strcmp(cmd, "add") == 0) ||
(strcmp(cmd, "del_tx") == 0)) {
struct wil_ring_tx_data *txdata;
if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) {
wil_err(wil, "BACK: invalid ring id %d\n", p1);
return -EINVAL;
}
txdata = &wil->ring_tx_data[p1];
if (strcmp(cmd, "add") == 0) {
if (rc < 3) {
wil_err(wil, "BACK: add require at least 2 params\n");
return -EINVAL;
}
if (rc < 4)
p3 = 0;
wmi_addba(wil, txdata->mid, p1, p2, p3);
} else {
if (rc < 3)
p2 = WLAN_REASON_QSTA_LEAVE_QBSS;
wmi_delba_tx(wil, txdata->mid, p1, p2);
}
} else if (strcmp(cmd, "del_rx") == 0) {
struct wil_sta_info *sta;
if (rc < 3) {
wil_err(wil,
"BACK: del_rx require at least 2 params\n");
return -EINVAL;
}
if (p1 < 0 || p1 >= wil->max_assoc_sta) {
wil_err(wil, "BACK: invalid CID %d\n", p1);
return -EINVAL;
}
if (rc < 4)
p3 = WLAN_REASON_QSTA_LEAVE_QBSS;
sta = &wil->sta[p1];
wmi_delba_rx(wil, sta->mid, p1, p2, p3);
} else {
wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd);
return -EINVAL;
}
return len;
}
static ssize_t wil_read_back(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
static const char text[] = "block ack control, write:\n"
" - \"add <ringid> <agg_size> <timeout>\" to trigger ADDBA\n"
"If missing, <timeout> defaults to 0\n"
" - \"del_tx <ringid> <reason>\" to trigger DELBA for Tx side\n"
" - \"del_rx <CID> <TID> <reason>\" to trigger DELBA for Rx side\n"
"If missing, <reason> set to \"STA_LEAVING\" (36)\n";
return simple_read_from_buffer(user_buf, count, ppos, text,
sizeof(text));
}
static const struct file_operations fops_back = {
.read = wil_read_back,
.write = wil_write_back,
.open = simple_open,
};
/* pmc control, write:
* - "alloc <num descriptors> <descriptor_size>" to allocate PMC
* - "free" to release memory allocated for PMC
*/
static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
int rc;
char *kbuf = kmalloc(len + 1, GFP_KERNEL);
char cmd[9];
int num_descs, desc_size;
if (!kbuf)
return -ENOMEM;
rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
if (rc != len) {
kfree(kbuf);
return rc >= 0 ? -EIO : rc;
}
kbuf[len] = '\0';
rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size);
kfree(kbuf);
if (rc < 0)
return rc;
if (rc < 1) {
wil_err(wil, "pmccfg: no params given\n");
return -EINVAL;
}
if (0 == strcmp(cmd, "alloc")) {
if (rc != 3) {
wil_err(wil, "pmccfg: alloc requires 2 params\n");
return -EINVAL;
}
wil_pmc_alloc(wil, num_descs, desc_size);
} else if (0 == strcmp(cmd, "free")) {
if (rc != 1) {
wil_err(wil, "pmccfg: free does not have any params\n");
return -EINVAL;
}
wil_pmc_free(wil, true);
} else {
wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd);
return -EINVAL;
}
return len;
}
static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
char text[256];
char help[] = "pmc control, write:\n"
" - \"alloc <num descriptors> <descriptor_size>\" to allocate pmc\n"
" - \"free\" to free memory allocated for pmc\n";
snprintf(text, sizeof(text), "Last command status: %d\n\n%s",
wil_pmc_last_cmd_status(wil), help);
return simple_read_from_buffer(user_buf, count, ppos, text,
strlen(text) + 1);
}
static const struct file_operations fops_pmccfg = {
.read = wil_read_pmccfg,
.write = wil_write_pmccfg,
.open = simple_open,
};
static const struct file_operations fops_pmcdata = {
.open = simple_open,
.read = wil_pmc_read,
.llseek = wil_pmc_llseek,
};
static int wil_pmcring_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_pmcring_read, inode->i_private);
}
static const struct file_operations fops_pmcring = {
.open = wil_pmcring_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
};
/*---tx_mgmt---*/
/* Write mgmt frame to this file to send it */
static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
struct wiphy *wiphy = wil_to_wiphy(wil);
struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
struct cfg80211_mgmt_tx_params params;
int rc;
void *frame;
memset(¶ms, 0, sizeof(params));
if (!len)
return -EINVAL;
frame = memdup_user(buf, len);
if (IS_ERR(frame))
return PTR_ERR(frame);
params.buf = frame;
params.len = len;
rc = wil_cfg80211_mgmt_tx(wiphy, wdev, ¶ms, NULL);
kfree(frame);
wil_info(wil, "-> %d\n", rc);
return len;
}
static const struct file_operations fops_txmgmt = {
.write = wil_write_file_txmgmt,
.open = simple_open,
};
/* Write WMI command (w/o mbox header) to this file to send it
* WMI starts from wil6210_mbox_hdr_wmi header
*/
static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wmi_cmd_hdr *wmi;
void *cmd;
int cmdlen = len - sizeof(struct wmi_cmd_hdr);
u16 cmdid;
int rc1;
if (cmdlen < 0 || *ppos != 0)
return -EINVAL;
wmi = memdup_user(buf, len);
if (IS_ERR(wmi))
return PTR_ERR(wmi);
cmd = (cmdlen > 0) ? &wmi[1] : NULL;
cmdid = le16_to_cpu(wmi->command_id);
rc1 = wmi_send(wil, cmdid, vif->mid, cmd, cmdlen);
kfree(wmi);
wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
return len;
}
static const struct file_operations fops_wmi = {
.write = wil_write_file_wmi,
.open = simple_open,
};
static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
{
int i = 0;
int len = skb_headlen(skb);
void *p = skb->data;
int nr_frags = skb_shinfo(skb)->nr_frags;
seq_printf(s, " len = %d\n", len);
wil_seq_hexdump(s, p, len, " : ");
if (nr_frags) {
seq_printf(s, " nr_frags = %d\n", nr_frags);
for (i = 0; i < nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
p = skb_frag_address_safe(frag);
seq_printf(s, " [%2d] : len = %d\n", i, len);
wil_seq_hexdump(s, p, len, " : ");
}
}
}
/*---------Tx/Rx descriptor------------*/
static int txdesc_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wil_ring *ring;
bool tx;
int ring_idx = dbg_ring_index;
int txdesc_idx = dbg_txdesc_index;
volatile struct vring_tx_desc *d;
volatile u32 *u;
struct sk_buff *skb;
if (wil->use_enhanced_dma_hw) {
/* RX ring index == 0 */
if (ring_idx >= WIL6210_MAX_TX_RINGS) {
seq_printf(s, "invalid ring index %d\n", ring_idx);
return 0;
}
tx = ring_idx > 0; /* desc ring 0 is reserved for RX */
} else {
/* RX ring index == WIL6210_MAX_TX_RINGS */
if (ring_idx > WIL6210_MAX_TX_RINGS) {
seq_printf(s, "invalid ring index %d\n", ring_idx);
return 0;
}
tx = (ring_idx < WIL6210_MAX_TX_RINGS);
}
ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx;
if (!ring->va) {
if (tx)
seq_printf(s, "No Tx[%2d] RING\n", ring_idx);
else
seq_puts(s, "No Rx RING\n");
return 0;
}
if (txdesc_idx >= ring->size) {
if (tx)
seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
ring_idx, txdesc_idx, ring->size);
else
seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
txdesc_idx, ring->size);
return 0;
}
/* use struct vring_tx_desc for Rx as well,
* only field used, .dma.length, is the same
*/
d = &ring->va[txdesc_idx].tx.legacy;
u = (volatile u32 *)d;
skb = NULL;
if (wil->use_enhanced_dma_hw) {
if (tx) {
skb = ring->ctx ? ring->ctx[txdesc_idx].skb : NULL;
} else if (wil->rx_buff_mgmt.buff_arr) {
struct wil_rx_enhanced_desc *rx_d =
(struct wil_rx_enhanced_desc *)
&ring->va[txdesc_idx].rx.enhanced;
u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
if (!wil_val_in_range(buff_id, 0,
wil->rx_buff_mgmt.size))
seq_printf(s, "invalid buff_id %d\n", buff_id);
else
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
}
} else {
skb = ring->ctx[txdesc_idx].skb;
}
if (tx)
seq_printf(s, "Tx[%2d][%3d] = {\n", ring_idx,
txdesc_idx);
else
seq_printf(s, "Rx[%3d] = {\n", txdesc_idx);
seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[0], u[1], u[2], u[3]);
seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[4], u[5], u[6], u[7]);
seq_printf(s, " SKB = 0x%p\n", skb);
if (skb) {
skb_get(skb);
wil_seq_print_skb(s, skb);
kfree_skb(skb);
}
seq_puts(s, "}\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(txdesc);
/*---------Tx/Rx status message------------*/
static int status_msg_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
int sring_idx = dbg_sring_index;
struct wil_status_ring *sring;
bool tx;
u32 status_msg_idx = dbg_status_msg_index;
u32 *u;
if (sring_idx >= WIL6210_MAX_STATUS_RINGS) {
seq_printf(s, "invalid status ring index %d\n", sring_idx);
return 0;
}
sring = &wil->srings[sring_idx];
tx = !sring->is_rx;
if (!sring->va) {
seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R');
return 0;
}
if (status_msg_idx >= sring->size) {
seq_printf(s, "%cxDesc index (%d) >= size (%d)\n",
tx ? 'T' : 'R', status_msg_idx, sring->size);
return 0;
}
u = sring->va + (sring->elem_size * status_msg_idx);
seq_printf(s, "%cx[%d][%3d] = {\n",
tx ? 'T' : 'R', sring_idx, status_msg_idx);
seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[0], u[1], u[2], u[3]);
if (!tx && !wil->use_compressed_rx_status)
seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[4], u[5], u[6], u[7]);
seq_puts(s, "}\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(status_msg);
static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
{
struct wil_rx_buff *it;
int i = 0;
list_for_each_entry(it, lh, list) {
if ((i % 16) == 0 && i != 0)
seq_puts(s, "\n ");
seq_printf(s, "[%4d] ", it->id);
i++;
}
seq_printf(s, "\nNumber of buffers: %u\n", i);
return i;
}
static int rx_buff_mgmt_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt;
int num_active;
int num_free;
if (!rbm->buff_arr)
return -EINVAL;
seq_printf(s, " size = %zu\n", rbm->size);
seq_printf(s, " free_list_empty_cnt = %lu\n",
rbm->free_list_empty_cnt);
/* Print active list */
seq_puts(s, " Active list:\n");
num_active = wil_print_rx_buff(s, &rbm->active);
seq_puts(s, "\n Free list:\n");
num_free = wil_print_rx_buff(s, &rbm->free);
seq_printf(s, " Total number of buffers: %u\n",
num_active + num_free);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(rx_buff_mgmt);
/*---------beamforming------------*/
static char *wil_bfstatus_str(u32 status)
{
switch (status) {
case 0:
return "Failed";
case 1:
return "OK";
case 2:
return "Retrying";
default:
return "??";
}
}
static bool is_all_zeros(void * const x_, size_t sz)
{
/* if reply is all-0, ignore this CID */
u32 *x = x_;
int n;
for (n = 0; n < sz / sizeof(*x); n++)
if (x[n])
return false;
return true;
}
static int bf_show(struct seq_file *s, void *data)
{
int rc;
int i;
struct wil6210_priv *wil = s->private;
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wmi_notify_req_cmd cmd = {
.interval_usec = 0,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_notify_req_done_event evt;
} __packed reply;
memset(&reply, 0, sizeof(reply));
for (i = 0; i < wil->max_assoc_sta; i++) {
u32 status;
u8 bf_mcs;
cmd.cid = i;
rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_NOTIFY_REQ_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
/* if reply is all-0, ignore this CID */
if (rc || is_all_zeros(&reply.evt, sizeof(reply.evt)))
continue;
status = le32_to_cpu(reply.evt.status);
bf_mcs = le16_to_cpu(reply.evt.bf_mcs);
seq_printf(s, "CID %d {\n"
" TSF = 0x%016llx\n"
" TxMCS = %s TxTpt = %4d\n"
" SQI = %4d\n"
" RSSI = %4d\n"
" Status = 0x%08x %s\n"
" Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n"
" Goodput(rx:tx) %4d:%4d\n"
"}\n",
i,
le64_to_cpu(reply.evt.tsf),
WIL_EXTENDED_MCS_CHECK(bf_mcs),
le32_to_cpu(reply.evt.tx_tpt),
reply.evt.sqi,
reply.evt.rssi,
status, wil_bfstatus_str(status),
le16_to_cpu(reply.evt.my_rx_sector),
le16_to_cpu(reply.evt.my_tx_sector),
le16_to_cpu(reply.evt.other_rx_sector),
le16_to_cpu(reply.evt.other_tx_sector),
le32_to_cpu(reply.evt.rx_goodput),
le32_to_cpu(reply.evt.tx_goodput));
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(bf);
/*---------temp------------*/
static void print_temp(struct seq_file *s, const char *prefix, s32 t)
{
switch (t) {
case 0:
case WMI_INVALID_TEMPERATURE:
seq_printf(s, "%s N/A\n", prefix);
break;
default:
seq_printf(s, "%s %s%d.%03d\n", prefix, (t < 0 ? "-" : ""),
abs(t / 1000), abs(t % 1000));
break;
}
}
static int temp_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
int rc, i;
if (test_bit(WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF,
wil->fw_capabilities)) {
struct wmi_temp_sense_all_done_event sense_all_evt;
wil_dbg_misc(wil,
"WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF is supported");
rc = wmi_get_all_temperatures(wil, &sense_all_evt);
if (rc) {
seq_puts(s, "Failed\n");
return 0;
}
print_temp(s, "T_mac =",
le32_to_cpu(sense_all_evt.baseband_t1000));
seq_printf(s, "Connected RFs [0x%08x]\n",
sense_all_evt.rf_bitmap);
for (i = 0; i < WMI_MAX_XIF_PORTS_NUM; i++) {
seq_printf(s, "RF[%d] = ", i);
print_temp(s, "",
le32_to_cpu(sense_all_evt.rf_t1000[i]));
}
} else {
s32 t_m, t_r;
wil_dbg_misc(wil,
"WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF is not supported");
rc = wmi_get_temperature(wil, &t_m, &t_r);
if (rc) {
seq_puts(s, "Failed\n");
return 0;
}
print_temp(s, "T_mac =", t_m);
print_temp(s, "T_radio =", t_r);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(temp);
/*---------link------------*/
static int link_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct station_info *sinfo;
int i, rc = 0;
sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
for (i = 0; i < wil->max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
struct wil6210_vif *vif;
u8 mid;
switch (p->status) {
case wil_sta_unused:
status = "unused ";
break;
case wil_sta_conn_pending:
status = "pending ";
break;
case wil_sta_connected:
status = "connected";
break;
}
mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
seq_printf(s, "[%d][MID %d] %pM %s\n",
i, mid, p->addr, status);
if (p->status != wil_sta_connected)
continue;
vif = (mid < GET_MAX_VIFS(wil)) ? wil->vifs[mid] : NULL;
if (vif) {
rc = wil_cid_fill_sinfo(vif, i, sinfo);
if (rc)
goto out;
seq_printf(s, " Tx_mcs = %s\n",
WIL_EXTENDED_MCS_CHECK(sinfo->txrate.mcs));
seq_printf(s, " Rx_mcs = %s\n",
WIL_EXTENDED_MCS_CHECK(sinfo->rxrate.mcs));
seq_printf(s, " SQ = %d\n", sinfo->signal);
} else {
seq_puts(s, " INVALID MID\n");
}
}
out:
kfree(sinfo);
return rc;
}
DEFINE_SHOW_ATTRIBUTE(link);
/*---------info------------*/
static int info_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct net_device *ndev = wil->main_ndev;
int is_ac = power_supply_is_system_supplied();
int rx = atomic_xchg(&wil->isr_count_rx, 0);
int tx = atomic_xchg(&wil->isr_count_tx, 0);
static ulong rxf_old, txf_old;
ulong rxf = ndev->stats.rx_packets;
ulong txf = ndev->stats.tx_packets;
unsigned int i;
/* >0 : AC; 0 : battery; <0 : error */
seq_printf(s, "AC powered : %d\n", is_ac);
seq_printf(s, "Rx irqs:packets : %8d : %8ld\n", rx, rxf - rxf_old);
seq_printf(s, "Tx irqs:packets : %8d : %8ld\n", tx, txf - txf_old);
rxf_old = rxf;
txf_old = txf;
#define CHECK_QSTATE(x) (state & BIT(__QUEUE_STATE_ ## x)) ? \
" " __stringify(x) : ""
for (i = 0; i < ndev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(ndev, i);
unsigned long state = txq->state;
seq_printf(s, "Tx queue[%i] state : 0x%lx%s%s%s\n", i, state,
CHECK_QSTATE(DRV_XOFF),
CHECK_QSTATE(STACK_XOFF),
CHECK_QSTATE(FROZEN)
);
}
#undef CHECK_QSTATE
return 0;
}
DEFINE_SHOW_ATTRIBUTE(info);
/*---------recovery------------*/
/* mode = [manual|auto]
* state = [idle|pending|running]
*/
static ssize_t wil_read_file_recovery(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
char buf[80];
int n;
static const char * const sstate[] = {"idle", "pending", "running"};
n = snprintf(buf, sizeof(buf), "mode = %s\nstate = %s\n",
no_fw_recovery ? "manual" : "auto",
sstate[wil->recovery_state]);
n = min_t(int, n, sizeof(buf));
return simple_read_from_buffer(user_buf, count, ppos,
buf, n);
}
static ssize_t wil_write_file_recovery(struct file *file,
const char __user *buf_,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
static const char run_command[] = "run";
char buf[sizeof(run_command) + 1]; /* to detect "runx" */
ssize_t rc;
if (wil->recovery_state != fw_recovery_pending) {
wil_err(wil, "No recovery pending\n");
return -EINVAL;
}
if (*ppos != 0) {
wil_err(wil, "Offset [%d]\n", (int)*ppos);
return -EINVAL;
}
if (count > sizeof(buf)) {
wil_err(wil, "Input too long, len = %d\n", (int)count);
return -EINVAL;
}
rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, buf_, count);
if (rc < 0)
return rc;
buf[rc] = '\0';
if (0 == strcmp(buf, run_command))
wil_set_recovery_state(wil, fw_recovery_running);
else
wil_err(wil, "Bad recovery command \"%s\"\n", buf);
return rc;
}
static const struct file_operations fops_recovery = {
.read = wil_read_file_recovery,
.write = wil_write_file_recovery,
.open = simple_open,
};
/*---------Station matrix------------*/
static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
{
int i;
u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
unsigned long long drop_dup_mcast = r->drop_dup_mcast;
seq_printf(s, "([%2d]) 0x%03x [", r->buf_size, r->head_seq_num);
for (i = 0; i < r->buf_size; i++) {
if (i == index)
seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|');
else
seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
}
seq_printf(s,
"] total %llu drop %llu (dup %llu + old %llu + dup mcast %llu) last 0x%03x\n",
r->total, drop_dup + drop_old + drop_dup_mcast, drop_dup,
drop_old, drop_dup_mcast, r->ssn_last_drop);
}
static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
struct wil_tid_crypto_rx *c)
{
int i;
for (i = 0; i < 4; i++) {
struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
if (cc->key_set)
goto has_keys;
}
return;
has_keys:
if (tid < WIL_STA_TID_NUM)
seq_printf(s, " [%2d] PN", tid);
else
seq_puts(s, " [GR] PN");
for (i = 0; i < 4; i++) {
struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
seq_printf(s, " [%i%s]%6phN", i, cc->key_set ? "+" : "-",
cc->pn);
}
seq_puts(s, "\n");
}
static int sta_show(struct seq_file *s, void *data)
__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
{
struct wil6210_priv *wil = s->private;
int i, tid, mcs;
for (i = 0; i < wil->max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
u8 aid = 0;
u8 mid;
bool sta_connected = false;
switch (p->status) {
case wil_sta_unused:
status = "unused ";
break;
case wil_sta_conn_pending:
status = "pending ";
break;
case wil_sta_connected:
status = "connected";
aid = p->aid;
break;
}
mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
if (mid < GET_MAX_VIFS(wil)) {
struct wil6210_vif *vif = wil->vifs[mid];
if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
p->status == wil_sta_connected)
sta_connected = true;
}
/* print roam counter only for connected stations */
if (sta_connected)
seq_printf(s, "[%d] %pM connected (roam counter %d) MID %d AID %d\n",
i, p->addr, p->stats.ft_roams, mid, aid);
else
seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i,
p->addr, status, mid, aid);
if (p->status == wil_sta_connected) {
spin_lock_bh(&p->tid_rx_lock);
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
struct wil_tid_crypto_rx *c =
&p->tid_crypto_rx[tid];
if (r) {
seq_printf(s, " [%2d] ", tid);
wil_print_rxtid(s, r);
}
wil_print_rxtid_crypto(s, tid, c);
}
wil_print_rxtid_crypto(s, WIL_STA_TID_NUM,
&p->group_crypto_rx);
spin_unlock_bh(&p->tid_rx_lock);
seq_printf(s,
"Rx invalid frame: non-data %lu, short %lu, large %lu, replay %lu\n",
p->stats.rx_non_data_frame,
p->stats.rx_short_frame,
p->stats.rx_large_frame,
p->stats.rx_replay);
seq_printf(s,
"mic error %lu, key error %lu, amsdu error %lu, csum error %lu\n",
p->stats.rx_mic_error,
p->stats.rx_key_error,
p->stats.rx_amsdu_error,
p->stats.rx_csum_err);
seq_puts(s, "Rx/MCS:");
for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
mcs++)
seq_printf(s, " %lld",
p->stats.rx_per_mcs[mcs]);
seq_puts(s, "\n");
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(sta);
static int mids_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wil6210_vif *vif;
struct net_device *ndev;
int i;
mutex_lock(&wil->vif_mutex);
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
ndev = vif_to_ndev(vif);
seq_printf(s, "[%d] %pM %s\n", i, ndev->dev_addr,
ndev->name);
} else {
seq_printf(s, "[%d] unused\n", i);
}
}
mutex_unlock(&wil->vif_mutex);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mids);
static int wil_tx_latency_debugfs_show(struct seq_file *s, void *data)
__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
{
struct wil6210_priv *wil = s->private;
int i, bin;
for (i = 0; i < wil->max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
u8 aid = 0;
u8 mid;
if (!p->tx_latency_bins)
continue;
switch (p->status) {
case wil_sta_unused:
status = "unused ";
break;
case wil_sta_conn_pending:
status = "pending ";
break;
case wil_sta_connected:
status = "connected";
aid = p->aid;
break;
}
mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status,
mid, aid);
if (p->status == wil_sta_connected) {
u64 num_packets = 0;
u64 tx_latency_avg = p->stats.tx_latency_total_us;
seq_puts(s, "Tx/Latency bin:");
for (bin = 0; bin < WIL_NUM_LATENCY_BINS; bin++) {
seq_printf(s, " %lld",
p->tx_latency_bins[bin]);
num_packets += p->tx_latency_bins[bin];
}
seq_puts(s, "\n");
if (!num_packets)
continue;
do_div(tx_latency_avg, num_packets);
seq_printf(s, "Tx/Latency min/avg/max (us): %d/%lld/%d",
p->stats.tx_latency_min_us,
tx_latency_avg,
p->stats.tx_latency_max_us);
seq_puts(s, "\n");
}
}
return 0;
}
static int wil_tx_latency_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_tx_latency_debugfs_show,
inode->i_private);
}
static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct wil6210_priv *wil = s->private;
int val, rc, i;
bool enable;
rc = kstrtoint_from_user(buf, len, 0, &val);
if (rc) {
wil_err(wil, "Invalid argument\n");
return rc;
}
if (val == 1)
/* default resolution */
val = 500;
if (val && (val < 50 || val > 1000)) {
wil_err(wil, "Invalid resolution %d\n", val);
return -EINVAL;
}
enable = !!val;
if (wil->tx_latency == enable)
return len;
wil_info(wil, "%s TX latency measurements (resolution %dusec)\n",
enable ? "Enabling" : "Disabling", val);
if (enable) {
size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS;
wil->tx_latency_res = val;
for (i = 0; i < wil->max_assoc_sta; i++) {
struct wil_sta_info *sta = &wil->sta[i];
kfree(sta->tx_latency_bins);
sta->tx_latency_bins = kzalloc(sz, GFP_KERNEL);
if (!sta->tx_latency_bins)
return -ENOMEM;
sta->stats.tx_latency_min_us = U32_MAX;
sta->stats.tx_latency_max_us = 0;
sta->stats.tx_latency_total_us = 0;
}
}
wil->tx_latency = enable;
return len;
}
static const struct file_operations fops_tx_latency = {
.open = wil_tx_latency_seq_open,
.release = single_release,
.read = seq_read,
.write = wil_tx_latency_write,
.llseek = seq_lseek,
};
static void wil_link_stats_print_basic(struct wil6210_vif *vif,
struct seq_file *s,
struct wmi_link_stats_basic *basic)
{
char per[5] = "?";
if (basic->per_average != 0xff)
snprintf(per, sizeof(per), "%d%%", basic->per_average);
seq_printf(s, "CID %d {\n"
"\tTxMCS %s TxTpt %d\n"
"\tGoodput(rx:tx) %d:%d\n"
"\tRxBcastFrames %d\n"
"\tRSSI %d SQI %d SNR %d PER %s\n"
"\tRx RFC %d Ant num %d\n"
"\tSectors(rx:tx) my %d:%d peer %d:%d\n"
"}\n",
basic->cid,
WIL_EXTENDED_MCS_CHECK(basic->bf_mcs),
le32_to_cpu(basic->tx_tpt),
le32_to_cpu(basic->rx_goodput),
le32_to_cpu(basic->tx_goodput),
le32_to_cpu(basic->rx_bcast_frames),
basic->rssi, basic->sqi, basic->snr, per,
basic->selected_rfc, basic->rx_effective_ant_num,
basic->my_rx_sector, basic->my_tx_sector,
basic->other_rx_sector, basic->other_tx_sector);
}
static void wil_link_stats_print_global(struct wil6210_priv *wil,
struct seq_file *s,
struct wmi_link_stats_global *global)
{
seq_printf(s, "Frames(rx:tx) %d:%d\n"
"BA Frames(rx:tx) %d:%d\n"
"Beacons %d\n"
"Rx Errors (MIC:CRC) %d:%d\n"
"Tx Errors (no ack) %d\n",
le32_to_cpu(global->rx_frames),
le32_to_cpu(global->tx_frames),
le32_to_cpu(global->rx_ba_frames),
le32_to_cpu(global->tx_ba_frames),
le32_to_cpu(global->tx_beacons),
le32_to_cpu(global->rx_mic_errors),
le32_to_cpu(global->rx_crc_errors),
le32_to_cpu(global->tx_fail_no_ack));
}
static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif,
struct seq_file *s)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_link_stats_basic *stats;
int i;
if (!vif->fw_stats_ready) {
seq_puts(s, "no statistics\n");
return;
}
seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf);
for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].status == wil_sta_unused)
continue;
if (wil->sta[i].mid != vif->mid)
continue;
stats = &wil->sta[i].fw_stats_basic;
wil_link_stats_print_basic(vif, s, stats);
}
}
static int wil_link_stats_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wil6210_vif *vif;
int i, rc;
rc = mutex_lock_interruptible(&wil->vif_mutex);
if (rc)
return rc;
/* iterate over all MIDs and show per-cid statistics. Then show the
* global statistics
*/
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
seq_printf(s, "MID %d ", i);
if (!vif) {
seq_puts(s, "unused\n");
continue;
}
wil_link_stats_debugfs_show_vif(vif, s);
}
mutex_unlock(&wil->vif_mutex);
return 0;
}
static int wil_link_stats_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_link_stats_debugfs_show, inode->i_private);
}
static ssize_t wil_link_stats_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct wil6210_priv *wil = s->private;
int cid, interval, rc, i;
struct wil6210_vif *vif;
char *kbuf = kmalloc(len + 1, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
if (rc != len) {
kfree(kbuf);
return rc >= 0 ? -EIO : rc;
}
kbuf[len] = '\0';
/* specify cid (use -1 for all cids) and snapshot interval in ms */
rc = sscanf(kbuf, "%d %d", &cid, &interval);
kfree(kbuf);
if (rc < 0)
return rc;
if (rc < 2 || interval < 0)
return -EINVAL;
wil_info(wil, "request link statistics, cid %d interval %d\n",
cid, interval);
rc = mutex_lock_interruptible(&wil->vif_mutex);
if (rc)
return rc;
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (!vif)
continue;
rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_BASIC,
(cid == -1 ? 0xff : cid), interval);
if (rc)
wil_err(wil, "link statistics failed for mid %d\n", i);
}
mutex_unlock(&wil->vif_mutex);
return len;
}
static const struct file_operations fops_link_stats = {
.open = wil_link_stats_seq_open,
.release = single_release,
.read = seq_read,
.write = wil_link_stats_write,
.llseek = seq_lseek,
};
static int
wil_link_stats_global_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
if (!wil->fw_stats_global.ready)
return 0;
seq_printf(s, "TSF %lld\n", wil->fw_stats_global.tsf);
wil_link_stats_print_global(wil, s, &wil->fw_stats_global.stats);
return 0;
}
static int
wil_link_stats_global_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_link_stats_global_debugfs_show,
inode->i_private);
}
static ssize_t
wil_link_stats_global_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct wil6210_priv *wil = s->private;
int interval, rc;
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
/* specify snapshot interval in ms */
rc = kstrtoint_from_user(buf, len, 0, &interval);
if (rc || interval < 0) {
wil_err(wil, "Invalid argument\n");
return -EINVAL;
}
wil_info(wil, "request global link stats, interval %d\n", interval);
rc = wmi_link_stats_cfg(vif, WMI_LINK_STATS_TYPE_GLOBAL, 0, interval);
if (rc)
wil_err(wil, "global link stats failed %d\n", rc);
return rc ? rc : len;
}
static const struct file_operations fops_link_stats_global = {
.open = wil_link_stats_global_seq_open,
.release = single_release,
.read = seq_read,
.write = wil_link_stats_global_write,
.llseek = seq_lseek,
};
static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char buf[80];
int n;
n = snprintf(buf, sizeof(buf),
"led_id is set to %d, echo 1 to enable, 0 to disable\n",
led_id);
n = min_t(int, n, sizeof(buf));
return simple_read_from_buffer(user_buf, count, ppos,
buf, n);
}
static ssize_t wil_write_file_led_cfg(struct file *file,
const char __user *buf_,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
int val;
int rc;
rc = kstrtoint_from_user(buf_, count, 0, &val);
if (rc) {
wil_err(wil, "Invalid argument\n");
return rc;
}
wil_info(wil, "%s led %d\n", val ? "Enabling" : "Disabling", led_id);
rc = wmi_led_cfg(wil, val);
if (rc) {
wil_info(wil, "%s led %d failed\n",
val ? "Enabling" : "Disabling", led_id);
return rc;
}
return count;
}
static const struct file_operations fops_led_cfg = {
.read = wil_read_file_led_cfg,
.write = wil_write_file_led_cfg,
.open = simple_open,
};
/* led_blink_time, write:
* "<blink_on_slow> <blink_off_slow> <blink_on_med> <blink_off_med> <blink_on_fast> <blink_off_fast>
*/
static ssize_t wil_write_led_blink_time(struct file *file,
const char __user *buf,
size_t len, loff_t *ppos)
{
int rc;
char *kbuf = kmalloc(len + 1, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
if (rc != len) {
kfree(kbuf);
return rc >= 0 ? -EIO : rc;
}
kbuf[len] = '\0';
rc = sscanf(kbuf, "%d %d %d %d %d %d",
&led_blink_time[WIL_LED_TIME_SLOW].on_ms,
&led_blink_time[WIL_LED_TIME_SLOW].off_ms,
&led_blink_time[WIL_LED_TIME_MED].on_ms,
&led_blink_time[WIL_LED_TIME_MED].off_ms,
&led_blink_time[WIL_LED_TIME_FAST].on_ms,
&led_blink_time[WIL_LED_TIME_FAST].off_ms);
kfree(kbuf);
if (rc < 0)
return rc;
if (rc < 6)
return -EINVAL;
return len;
}
static ssize_t wil_read_led_blink_time(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
static char text[400];
snprintf(text, sizeof(text),
"To set led blink on/off time variables write:\n"
"<blink_on_slow> <blink_off_slow> <blink_on_med> "
"<blink_off_med> <blink_on_fast> <blink_off_fast>\n"
"The current values are:\n"
"%d %d %d %d %d %d\n",
led_blink_time[WIL_LED_TIME_SLOW].on_ms,
led_blink_time[WIL_LED_TIME_SLOW].off_ms,
led_blink_time[WIL_LED_TIME_MED].on_ms,
led_blink_time[WIL_LED_TIME_MED].off_ms,
led_blink_time[WIL_LED_TIME_FAST].on_ms,
led_blink_time[WIL_LED_TIME_FAST].off_ms);
return simple_read_from_buffer(user_buf, count, ppos, text,
sizeof(text));
}
static const struct file_operations fops_led_blink_time = {
.read = wil_read_led_blink_time,
.write = wil_write_led_blink_time,
.open = simple_open,
};
/*---------FW capabilities------------*/
static int fw_capabilities_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
seq_printf(s, "fw_capabilities : %*pb\n", WMI_FW_CAPABILITY_MAX,
wil->fw_capabilities);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(fw_capabilities);
/*---------FW version------------*/
static int fw_version_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
if (wil->fw_version[0])
seq_printf(s, "%s\n", wil->fw_version);
else
seq_puts(s, "N/A\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(fw_version);
/*---------suspend_stats---------*/
static ssize_t wil_write_suspend_stats(struct file *file,
const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
return len;
}
static ssize_t wil_read_suspend_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
char *text;
int n, ret, text_size = 500;
text = kmalloc(text_size, GFP_KERNEL);
if (!text)
return -ENOMEM;
n = snprintf(text, text_size,
"Radio on suspend statistics:\n"
"successful suspends:%ld failed suspends:%ld\n"
"successful resumes:%ld failed resumes:%ld\n"
"rejected by device:%ld\n"
"Radio off suspend statistics:\n"
"successful suspends:%ld failed suspends:%ld\n"
"successful resumes:%ld failed resumes:%ld\n"
"General statistics:\n"
"rejected by host:%ld\n",
wil->suspend_stats.r_on.successful_suspends,
wil->suspend_stats.r_on.failed_suspends,
wil->suspend_stats.r_on.successful_resumes,
wil->suspend_stats.r_on.failed_resumes,
wil->suspend_stats.rejected_by_device,
wil->suspend_stats.r_off.successful_suspends,
wil->suspend_stats.r_off.failed_suspends,
wil->suspend_stats.r_off.successful_resumes,
wil->suspend_stats.r_off.failed_resumes,
wil->suspend_stats.rejected_by_host);
n = min_t(int, n, text_size);
ret = simple_read_from_buffer(user_buf, count, ppos, text, n);
kfree(text);
return ret;
}
static const struct file_operations fops_suspend_stats = {
.read = wil_read_suspend_stats,
.write = wil_write_suspend_stats,
.open = simple_open,
};
/*---------compressed_rx_status---------*/
static ssize_t wil_compressed_rx_status_write(struct file *file,
const char __user *buf,
size_t len, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct wil6210_priv *wil = s->private;
int compressed_rx_status;
int rc;
rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status);
if (rc) {
wil_err(wil, "Invalid argument\n");
return rc;
}
if (wil_has_active_ifaces(wil, true, false)) {
wil_err(wil, "cannot change edma config after iface is up\n");
return -EPERM;
}
wil_info(wil, "%sable compressed_rx_status\n",
compressed_rx_status ? "En" : "Dis");
wil->use_compressed_rx_status = compressed_rx_status;
return len;
}
static int
wil_compressed_rx_status_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
seq_printf(s, "%d\n", wil->use_compressed_rx_status);
return 0;
}
static int
wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_compressed_rx_status_show,
inode->i_private);
}
static const struct file_operations fops_compressed_rx_status = {
.open = wil_compressed_rx_status_seq_open,
.release = single_release,
.read = seq_read,
.write = wil_compressed_rx_status_write,
.llseek = seq_lseek,
};
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
{
int i;
char name[32];
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
struct wil_blob_wrapper *wil_blob = &wil->blobs[i];
struct debugfs_blob_wrapper *blob = &wil_blob->blob;
const struct fw_map *map = &fw_mapping[i];
if (!map->name)
continue;
wil_blob->wil = wil;
blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
blob->size = map->to - map->from;
snprintf(name, sizeof(name), "blob_%s", map->name);
wil_debugfs_create_ioblob(name, 0444, dbg, wil_blob);
}
}
/* misc files */
static const struct {
const char *name;
umode_t mode;
const struct file_operations *fops;
} dbg_files[] = {
{"mbox", 0444, &mbox_fops},
{"rings", 0444, &ring_fops},
{"stations", 0444, &sta_fops},
{"mids", 0444, &mids_fops},
{"desc", 0444, &txdesc_fops},
{"bf", 0444, &bf_fops},
{"mem_val", 0644, &memread_fops},
{"rxon", 0244, &fops_rxon},
{"tx_mgmt", 0244, &fops_txmgmt},
{"wmi_send", 0244, &fops_wmi},
{"back", 0644, &fops_back},
{"pmccfg", 0644, &fops_pmccfg},
{"pmcdata", 0444, &fops_pmcdata},
{"pmcring", 0444, &fops_pmcring},
{"temp", 0444, &temp_fops},
{"link", 0444, &link_fops},
{"info", 0444, &info_fops},
{"recovery", 0644, &fops_recovery},
{"led_cfg", 0644, &fops_led_cfg},
{"led_blink_time", 0644, &fops_led_blink_time},
{"fw_capabilities", 0444, &fw_capabilities_fops},
{"fw_version", 0444, &fw_version_fops},
{"suspend_stats", 0644, &fops_suspend_stats},
{"compressed_rx_status", 0644, &fops_compressed_rx_status},
{"srings", 0444, &srings_fops},
{"status_msg", 0444, &status_msg_fops},
{"rx_buff_mgmt", 0444, &rx_buff_mgmt_fops},
{"tx_latency", 0644, &fops_tx_latency},
{"link_stats", 0644, &fops_link_stats},
{"link_stats_global", 0644, &fops_link_stats_global},
{"rbufcap", 0244, &fops_rbufcap},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
struct dentry *dbg)
{
int i;
for (i = 0; i < ARRAY_SIZE(dbg_files); i++)
debugfs_create_file(dbg_files[i].name, dbg_files[i].mode, dbg,
wil, dbg_files[i].fops);
}
/* interrupt control blocks */
static const struct {
const char *name;
u32 icr_off;
} dbg_icr[] = {
{"USER_ICR", HOSTADDR(RGF_USER_USER_ICR)},
{"DMA_EP_TX_ICR", HOSTADDR(RGF_DMA_EP_TX_ICR)},
{"DMA_EP_RX_ICR", HOSTADDR(RGF_DMA_EP_RX_ICR)},
{"DMA_EP_MISC_ICR", HOSTADDR(RGF_DMA_EP_MISC_ICR)},
};
static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
struct dentry *dbg)
{
int i;
for (i = 0; i < ARRAY_SIZE(dbg_icr); i++)
wil6210_debugfs_create_ISR(wil, dbg_icr[i].name, dbg,
dbg_icr[i].icr_off);
}
#define WIL_FIELD(name, mode, type) { __stringify(name), mode, \
offsetof(struct wil6210_priv, name), type}
/* fields in struct wil6210_priv */
static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(status[0], 0644, doff_ulong),
WIL_FIELD(hw_version, 0444, doff_x32),
WIL_FIELD(recovery_count, 0444, doff_u32),
WIL_FIELD(discovery_mode, 0644, doff_u8),
WIL_FIELD(chip_revision, 0444, doff_u8),
WIL_FIELD(abft_len, 0644, doff_u8),
WIL_FIELD(wakeup_trigger, 0644, doff_u8),
WIL_FIELD(ring_idle_trsh, 0644, doff_u32),
WIL_FIELD(num_rx_status_rings, 0644, doff_u8),
WIL_FIELD(rx_status_ring_order, 0644, doff_u32),
WIL_FIELD(tx_status_ring_order, 0644, doff_u32),
WIL_FIELD(rx_buff_id_count, 0644, doff_u32),
WIL_FIELD(amsdu_en, 0644, doff_u8),
{},
};
static const struct dbg_off dbg_wil_regs[] = {
{"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
doff_io32},
{"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
{"RGF_USER_USAGE_2", 0444, HOSTADDR(RGF_USER_USAGE_2), doff_io32},
{},
};
/* static parameters */
static const struct dbg_off dbg_statics[] = {
{"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
{"ring_index", 0644, (ulong)&dbg_ring_index, doff_u32},
{"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
{"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
{"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
{"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32},
{"drop_if_ring_full", 0644, (ulong)&drop_if_ring_full, doff_u8},
{},
};
static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) +
ARRAY_SIZE(dbg_wil_regs) - 1 +
ARRAY_SIZE(pseudo_isr_off) - 1 +
ARRAY_SIZE(lgc_itr_cnt_off) - 1 +
ARRAY_SIZE(tx_itr_cnt_off) - 1 +
ARRAY_SIZE(rx_itr_cnt_off) - 1;
int wil6210_debugfs_init(struct wil6210_priv *wil)
{
struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
wil_to_wiphy(wil)->debugfsdir);
if (IS_ERR_OR_NULL(dbg))
return -ENODEV;
wil->dbg_data.data_arr = kcalloc(dbg_off_count,
sizeof(struct wil_debugfs_iomem_data),
GFP_KERNEL);
if (!wil->dbg_data.data_arr) {
debugfs_remove_recursive(dbg);
wil->debug = NULL;
return -ENOMEM;
}
wil->dbg_data.iomem_data_count = 0;
wil_pmc_init(wil);
wil6210_debugfs_init_files(wil, dbg);
wil6210_debugfs_init_isr(wil, dbg);
wil6210_debugfs_init_blobs(wil, dbg);
wil6210_debugfs_init_offset(wil, dbg, wil, dbg_wil_off);
wil6210_debugfs_init_offset(wil, dbg, (void * __force)wil->csr,
dbg_wil_regs);
wil6210_debugfs_init_offset(wil, dbg, NULL, dbg_statics);
wil6210_debugfs_create_pseudo_ISR(wil, dbg);
wil6210_debugfs_create_ITR_CNT(wil, dbg);
return 0;
}
void wil6210_debugfs_remove(struct wil6210_priv *wil)
{
int i;
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
kfree(wil->dbg_data.data_arr);
for (i = 0; i < wil->max_assoc_sta; i++)
kfree(wil->sta[i].tx_latency_bins);
/* free pmc memory without sending command to fw, as it will
* be reset on the way down anyway
*/
wil_pmc_free(wil, false);
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/debugfs.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include "wil6210.h"
#include "wmi.h"
#define P2P_WILDCARD_SSID "DIRECT-"
#define P2P_DMG_SOCIAL_CHANNEL 2
#define P2P_SEARCH_DURATION_MS 500
#define P2P_DEFAULT_BI 100
static int wil_p2p_start_listen(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wil_p2p_info *p2p = &vif->p2p;
u8 channel = p2p->listen_chan.hw_value;
int rc;
lockdep_assert_held(&wil->mutex);
rc = wmi_p2p_cfg(vif, channel, P2P_DEFAULT_BI);
if (rc) {
wil_err(wil, "wmi_p2p_cfg failed\n");
goto out;
}
rc = wmi_set_ssid(vif, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
if (rc) {
wil_err(wil, "wmi_set_ssid failed\n");
goto out_stop;
}
rc = wmi_start_listen(vif);
if (rc) {
wil_err(wil, "wmi_start_listen failed\n");
goto out_stop;
}
INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
mod_timer(&p2p->discovery_timer,
jiffies + msecs_to_jiffies(p2p->listen_duration));
out_stop:
if (rc)
wmi_stop_discovery(vif);
out:
return rc;
}
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request)
{
return (request->n_channels == 1) &&
(request->channels[0]->hw_value == P2P_DMG_SOCIAL_CHANNEL);
}
int wil_p2p_search(struct wil6210_vif *vif,
struct cfg80211_scan_request *request)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct wil_p2p_info *p2p = &vif->p2p;
wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL);
lockdep_assert_held(&wil->mutex);
if (p2p->discovery_started) {
wil_err(wil, "search failed. discovery already ongoing\n");
rc = -EBUSY;
goto out;
}
rc = wmi_p2p_cfg(vif, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
if (rc) {
wil_err(wil, "wmi_p2p_cfg failed\n");
goto out;
}
rc = wmi_set_ssid(vif, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
if (rc) {
wil_err(wil, "wmi_set_ssid failed\n");
goto out_stop;
}
/* Set application IE to probe request and probe response */
rc = wmi_set_ie(vif, WMI_FRAME_PROBE_REQ,
request->ie_len, request->ie);
if (rc) {
wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n");
goto out_stop;
}
/* supplicant doesn't provide Probe Response IEs. As a workaround -
* re-use Probe Request IEs
*/
rc = wmi_set_ie(vif, WMI_FRAME_PROBE_RESP,
request->ie_len, request->ie);
if (rc) {
wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n");
goto out_stop;
}
rc = wmi_start_search(vif);
if (rc) {
wil_err(wil, "wmi_start_search failed\n");
goto out_stop;
}
p2p->discovery_started = 1;
INIT_WORK(&p2p->discovery_expired_work, wil_p2p_search_expired);
mod_timer(&p2p->discovery_timer,
jiffies + msecs_to_jiffies(P2P_SEARCH_DURATION_MS));
out_stop:
if (rc)
wmi_stop_discovery(vif);
out:
return rc;
}
int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
unsigned int duration, struct ieee80211_channel *chan,
u64 *cookie)
{
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
struct wil_p2p_info *p2p = &vif->p2p;
int rc;
if (!chan)
return -EINVAL;
wil_dbg_misc(wil, "p2p_listen: duration %d\n", duration);
mutex_lock(&wil->mutex);
if (p2p->discovery_started) {
wil_err(wil, "discovery already ongoing\n");
rc = -EBUSY;
goto out;
}
memcpy(&p2p->listen_chan, chan, sizeof(*chan));
*cookie = ++p2p->cookie;
p2p->listen_duration = duration;
mutex_lock(&wil->vif_mutex);
if (vif->scan_request) {
wil_dbg_misc(wil, "Delaying p2p listen until scan done\n");
p2p->pending_listen_wdev = wdev;
p2p->discovery_started = 1;
rc = 0;
mutex_unlock(&wil->vif_mutex);
goto out;
}
mutex_unlock(&wil->vif_mutex);
rc = wil_p2p_start_listen(vif);
if (rc)
goto out;
p2p->discovery_started = 1;
if (vif->mid == 0)
wil->radio_wdev = wdev;
cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
GFP_KERNEL);
out:
mutex_unlock(&wil->mutex);
return rc;
}
u8 wil_p2p_stop_discovery(struct wil6210_vif *vif)
{
struct wil_p2p_info *p2p = &vif->p2p;
u8 started = p2p->discovery_started;
if (p2p->discovery_started) {
if (p2p->pending_listen_wdev) {
/* discovery not really started, only pending */
p2p->pending_listen_wdev = NULL;
} else {
del_timer_sync(&p2p->discovery_timer);
wmi_stop_discovery(vif);
}
p2p->discovery_started = 0;
}
return started;
}
int wil_p2p_cancel_listen(struct wil6210_vif *vif, u64 cookie)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wil_p2p_info *p2p = &vif->p2p;
u8 started;
mutex_lock(&wil->mutex);
if (cookie != p2p->cookie) {
wil_info(wil, "Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
p2p->cookie, cookie);
mutex_unlock(&wil->mutex);
return -ENOENT;
}
started = wil_p2p_stop_discovery(vif);
mutex_unlock(&wil->mutex);
if (!started) {
wil_err(wil, "listen not started\n");
return -ENOENT;
}
mutex_lock(&wil->vif_mutex);
cfg80211_remain_on_channel_expired(vif_to_radio_wdev(wil, vif),
p2p->cookie,
&p2p->listen_chan,
GFP_KERNEL);
if (vif->mid == 0)
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
mutex_unlock(&wil->vif_mutex);
return 0;
}
void wil_p2p_listen_expired(struct work_struct *work)
{
struct wil_p2p_info *p2p = container_of(work,
struct wil_p2p_info, discovery_expired_work);
struct wil6210_vif *vif = container_of(p2p,
struct wil6210_vif, p2p);
struct wil6210_priv *wil = vif_to_wil(vif);
u8 started;
wil_dbg_misc(wil, "p2p_listen_expired\n");
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(vif);
mutex_unlock(&wil->mutex);
if (!started)
return;
mutex_lock(&wil->vif_mutex);
cfg80211_remain_on_channel_expired(vif_to_radio_wdev(wil, vif),
p2p->cookie,
&p2p->listen_chan,
GFP_KERNEL);
if (vif->mid == 0)
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
mutex_unlock(&wil->vif_mutex);
}
void wil_p2p_search_expired(struct work_struct *work)
{
struct wil_p2p_info *p2p = container_of(work,
struct wil_p2p_info, discovery_expired_work);
struct wil6210_vif *vif = container_of(p2p,
struct wil6210_vif, p2p);
struct wil6210_priv *wil = vif_to_wil(vif);
u8 started;
wil_dbg_misc(wil, "p2p_search_expired\n");
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(vif);
mutex_unlock(&wil->mutex);
if (started) {
struct cfg80211_scan_info info = {
.aborted = false,
};
mutex_lock(&wil->vif_mutex);
if (vif->scan_request) {
cfg80211_scan_done(vif->scan_request, &info);
vif->scan_request = NULL;
if (vif->mid == 0)
wil->radio_wdev =
wil->main_ndev->ieee80211_ptr;
}
mutex_unlock(&wil->vif_mutex);
}
}
void wil_p2p_delayed_listen_work(struct work_struct *work)
{
struct wil_p2p_info *p2p = container_of(work,
struct wil_p2p_info, delayed_listen_work);
struct wil6210_vif *vif = container_of(p2p,
struct wil6210_vif, p2p);
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "Checking delayed p2p listen\n");
if (!p2p->discovery_started || !p2p->pending_listen_wdev)
goto out;
mutex_lock(&wil->vif_mutex);
if (vif->scan_request) {
/* another scan started, wait again... */
mutex_unlock(&wil->vif_mutex);
goto out;
}
mutex_unlock(&wil->vif_mutex);
rc = wil_p2p_start_listen(vif);
mutex_lock(&wil->vif_mutex);
if (rc) {
cfg80211_remain_on_channel_expired(p2p->pending_listen_wdev,
p2p->cookie,
&p2p->listen_chan,
GFP_KERNEL);
if (vif->mid == 0)
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
} else {
cfg80211_ready_on_channel(p2p->pending_listen_wdev, p2p->cookie,
&p2p->listen_chan,
p2p->listen_duration, GFP_KERNEL);
if (vif->mid == 0)
wil->radio_wdev = p2p->pending_listen_wdev;
}
p2p->pending_listen_wdev = NULL;
mutex_unlock(&wil->vif_mutex);
out:
mutex_unlock(&wil->mutex);
}
void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wil_p2p_info *p2p = &vif->p2p;
struct cfg80211_scan_info info = {
.aborted = true,
};
lockdep_assert_held(&wil->mutex);
lockdep_assert_held(&wil->vif_mutex);
if (wil->radio_wdev != wil->p2p_wdev)
goto out;
if (!p2p->discovery_started) {
/* Regular scan on the p2p device */
if (vif->scan_request &&
vif->scan_request->wdev == wil->p2p_wdev)
wil_abort_scan(vif, true);
goto out;
}
/* Search or listen on p2p device */
mutex_unlock(&wil->vif_mutex);
wil_p2p_stop_discovery(vif);
mutex_lock(&wil->vif_mutex);
if (vif->scan_request) {
/* search */
cfg80211_scan_done(vif->scan_request, &info);
vif->scan_request = NULL;
} else {
/* listen */
cfg80211_remain_on_channel_expired(wil->radio_wdev,
p2p->cookie,
&p2p->listen_chan,
GFP_KERNEL);
}
out:
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/p2p.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
/* Algorithmic part of the firmware download.
* To be included in the container file providing framework
*/
#define wil_err_fw(wil, fmt, arg...) wil_err(wil, "ERR[ FW ]" fmt, ##arg)
#define wil_dbg_fw(wil, fmt, arg...) wil_dbg(wil, "DBG[ FW ]" fmt, ##arg)
#define wil_hex_dump_fw(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
print_hex_dump_debug("DBG[ FW ]" prefix_str, \
prefix_type, rowsize, \
groupsize, buf, len, ascii)
static bool wil_fw_addr_check(struct wil6210_priv *wil,
void __iomem **ioaddr, __le32 val,
u32 size, const char *msg)
{
*ioaddr = wmi_buffer_block(wil, val, size);
if (!(*ioaddr)) {
wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
return false;
}
return true;
}
/**
* wil_fw_verify - verify firmware file validity
*
* perform various checks for the firmware file header.
* records are not validated.
*
* Return file size or negative error
*/
static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size)
{
const struct wil_fw_record_head *hdr = (const void *)data;
struct wil_fw_record_file_header fh;
const struct wil_fw_record_file_header *fh_;
u32 crc;
u32 dlen;
if (size % 4) {
wil_err_fw(wil, "image size not aligned: %zu\n", size);
return -EINVAL;
}
/* have enough data for the file header? */
if (size < sizeof(*hdr) + sizeof(fh)) {
wil_err_fw(wil, "file too short: %zu bytes\n", size);
return -EINVAL;
}
/* start with the file header? */
if (le16_to_cpu(hdr->type) != wil_fw_type_file_header) {
wil_err_fw(wil, "no file header\n");
return -EINVAL;
}
/* data_len */
fh_ = (struct wil_fw_record_file_header *)&hdr[1];
dlen = le32_to_cpu(fh_->data_len);
if (dlen % 4) {
wil_err_fw(wil, "data length not aligned: %lu\n", (ulong)dlen);
return -EINVAL;
}
if (size < dlen) {
wil_err_fw(wil, "file truncated at %zu/%lu\n",
size, (ulong)dlen);
return -EINVAL;
}
if (dlen < sizeof(*hdr) + sizeof(fh)) {
wil_err_fw(wil, "data length too short: %lu\n", (ulong)dlen);
return -EINVAL;
}
/* signature */
if (le32_to_cpu(fh_->signature) != WIL_FW_SIGNATURE) {
wil_err_fw(wil, "bad header signature: 0x%08x\n",
le32_to_cpu(fh_->signature));
return -EINVAL;
}
/* version */
if (le32_to_cpu(fh_->version) > WIL_FW_FMT_VERSION) {
wil_err_fw(wil, "unsupported header version: %d\n",
le32_to_cpu(fh_->version));
return -EINVAL;
}
/* checksum. ~crc32(~0, data, size) when fh.crc set to 0*/
fh = *fh_;
fh.crc = 0;
crc = crc32_le(~0, (unsigned char const *)hdr, sizeof(*hdr));
crc = crc32_le(crc, (unsigned char const *)&fh, sizeof(fh));
crc = crc32_le(crc, (unsigned char const *)&fh_[1],
dlen - sizeof(*hdr) - sizeof(fh));
crc = ~crc;
if (crc != le32_to_cpu(fh_->crc)) {
wil_err_fw(wil, "checksum mismatch:"
" calculated for %lu bytes 0x%08x != 0x%08x\n",
(ulong)dlen, crc, le32_to_cpu(fh_->crc));
return -EINVAL;
}
return (int)dlen;
}
static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
size_t size)
{
return 0;
}
static int
fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_capabilities *rec = data;
size_t capa_size;
if (size < sizeof(*rec)) {
wil_err_fw(wil, "capabilities record too short: %zu\n", size);
/* let the FW load anyway */
return 0;
}
capa_size = size - offsetof(struct wil_fw_record_capabilities,
capabilities);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
memcpy(wil->fw_capabilities, rec->capabilities,
min_t(size_t, sizeof(wil->fw_capabilities), capa_size));
wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
rec->capabilities, capa_size, false);
return 0;
}
static int
fw_handle_brd_file(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_brd_file *rec = data;
u32 max_num_ent, i, ent_size;
if (size <= offsetof(struct wil_fw_record_brd_file, brd_info)) {
wil_err(wil, "board record too short, size %zu\n", size);
return -EINVAL;
}
ent_size = size - offsetof(struct wil_fw_record_brd_file, brd_info);
max_num_ent = ent_size / sizeof(struct brd_info);
if (!max_num_ent) {
wil_err(wil, "brd info entries are missing\n");
return -EINVAL;
}
wil->brd_info = kcalloc(max_num_ent, sizeof(struct wil_brd_info),
GFP_KERNEL);
if (!wil->brd_info)
return -ENOMEM;
for (i = 0; i < max_num_ent; i++) {
wil->brd_info[i].file_addr =
le32_to_cpu(rec->brd_info[i].base_addr);
wil->brd_info[i].file_max_size =
le32_to_cpu(rec->brd_info[i].max_size_bytes);
if (!wil->brd_info[i].file_addr)
break;
wil_dbg_fw(wil,
"brd info %d: file_addr 0x%x, file_max_size %d\n",
i, wil->brd_info[i].file_addr,
wil->brd_info[i].file_max_size);
}
wil->num_of_brd_entries = i;
if (wil->num_of_brd_entries == 0) {
kfree(wil->brd_info);
wil->brd_info = NULL;
wil_dbg_fw(wil,
"no valid brd info entries, using brd file addr\n");
} else {
wil_dbg_fw(wil, "num of brd info entries %d\n",
wil->num_of_brd_entries);
}
return 0;
}
static int
fw_handle_concurrency(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_concurrency *rec = data;
const struct wil_fw_concurrency_combo *combo;
const struct wil_fw_concurrency_limit *limit;
size_t remain, lsize;
int i, n_combos;
if (size < sizeof(*rec)) {
wil_err_fw(wil, "concurrency record too short: %zu\n", size);
/* continue, let the FW load anyway */
return 0;
}
n_combos = le16_to_cpu(rec->n_combos);
remain = size - offsetof(struct wil_fw_record_concurrency, combos);
combo = rec->combos;
for (i = 0; i < n_combos; i++) {
if (remain < sizeof(*combo))
goto out_short;
remain -= sizeof(*combo);
limit = combo->limits;
lsize = combo->n_limits * sizeof(*limit);
if (remain < lsize)
goto out_short;
remain -= lsize;
limit += combo->n_limits;
combo = (struct wil_fw_concurrency_combo *)limit;
}
return wil_cfg80211_iface_combinations_from_fw(wil, rec);
out_short:
wil_err_fw(wil, "concurrency record truncated\n");
return 0;
}
static int
fw_handle_comment(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_comment_hdr *hdr = data;
u32 magic;
int rc = 0;
if (size < sizeof(*hdr))
return 0;
magic = le32_to_cpu(hdr->magic);
switch (magic) {
case WIL_FW_CAPABILITIES_MAGIC:
wil_dbg_fw(wil, "magic is WIL_FW_CAPABILITIES_MAGIC\n");
rc = fw_handle_capabilities(wil, data, size);
break;
case WIL_BRD_FILE_MAGIC:
wil_dbg_fw(wil, "magic is WIL_BRD_FILE_MAGIC\n");
rc = fw_handle_brd_file(wil, data, size);
break;
case WIL_FW_CONCURRENCY_MAGIC:
wil_dbg_fw(wil, "magic is WIL_FW_CONCURRENCY_MAGIC\n");
rc = fw_handle_concurrency(wil, data, size);
break;
default:
wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
data, size, true);
}
return rc;
}
static int __fw_handle_data(struct wil6210_priv *wil, const void *data,
size_t size, __le32 addr)
{
const struct wil_fw_record_data *d = data;
void __iomem *dst;
size_t s = size - sizeof(*d);
if (size < sizeof(*d) + sizeof(u32)) {
wil_err_fw(wil, "data record too short: %zu\n", size);
return -EINVAL;
}
if (!wil_fw_addr_check(wil, &dst, addr, s, "address"))
return -EINVAL;
wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(addr), s);
wil_memcpy_toio_32(dst, d->data, s);
wmb(); /* finish before processing next record */
return 0;
}
static int fw_handle_data(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_data *d = data;
return __fw_handle_data(wil, data, size, d->addr);
}
static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_fill *d = data;
void __iomem *dst;
u32 v;
size_t s = (size_t)le32_to_cpu(d->size);
if (size != sizeof(*d)) {
wil_err_fw(wil, "bad size for fill record: %zu\n", size);
return -EINVAL;
}
if (s < sizeof(u32)) {
wil_err_fw(wil, "fill size too short: %zu\n", s);
return -EINVAL;
}
if (s % sizeof(u32)) {
wil_err_fw(wil, "fill size not aligned: %zu\n", s);
return -EINVAL;
}
if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
return -EINVAL;
v = le32_to_cpu(d->value);
wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
le32_to_cpu(d->addr), v, s);
wil_memset_toio_32(dst, v, s);
wmb(); /* finish before processing next record */
return 0;
}
static int fw_handle_file_header(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_file_header *d = data;
if (size != sizeof(*d)) {
wil_err_fw(wil, "file header length incorrect: %zu\n", size);
return -EINVAL;
}
wil_dbg_fw(wil, "new file, ver. %d, %i bytes\n",
d->version, d->data_len);
wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment,
sizeof(d->comment), true);
if (!memcmp(d->comment, WIL_FW_VERSION_PREFIX,
WIL_FW_VERSION_PREFIX_LEN))
memcpy(wil->fw_version,
d->comment + WIL_FW_VERSION_PREFIX_LEN,
min(sizeof(d->comment) - WIL_FW_VERSION_PREFIX_LEN,
sizeof(wil->fw_version) - 1));
return 0;
}
static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_direct_write *d = data;
const struct wil_fw_data_dwrite *block = d->data;
int n, i;
if (size % sizeof(*block)) {
wil_err_fw(wil, "record size not aligned on %zu: %zu\n",
sizeof(*block), size);
return -EINVAL;
}
n = size / sizeof(*block);
for (i = 0; i < n; i++) {
void __iomem *dst;
u32 m = le32_to_cpu(block[i].mask);
u32 v = le32_to_cpu(block[i].value);
u32 x, y;
if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
return -EINVAL;
x = readl(dst);
y = (x & m) | (v & ~m);
wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
"(old 0x%08x val 0x%08x mask 0x%08x)\n",
le32_to_cpu(block[i].addr), y, x, v, m);
writel(y, dst);
wmb(); /* finish before processing next record */
}
return 0;
}
static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr,
void __iomem *gwa_cmd, void __iomem *gwa_ctl, u32 gw_cmd,
u32 a)
{
unsigned delay = 0;
writel(a, gwa_addr);
writel(gw_cmd, gwa_cmd);
wmb(); /* finish before activate gw */
writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
do {
udelay(1); /* typical time is few usec */
if (delay++ > 100) {
wil_err_fw(wil, "gw timeout\n");
return -EINVAL;
}
} while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
return 0;
}
static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_gateway_data *d = data;
const struct wil_fw_data_gw *block = d->data;
void __iomem *gwa_addr;
void __iomem *gwa_val;
void __iomem *gwa_cmd;
void __iomem *gwa_ctl;
u32 gw_cmd;
int n, i;
if (size < sizeof(*d) + sizeof(*block)) {
wil_err_fw(wil, "gateway record too short: %zu\n", size);
return -EINVAL;
}
if ((size - sizeof(*d)) % sizeof(*block)) {
wil_err_fw(wil, "gateway record data size"
" not aligned on %zu: %zu\n",
sizeof(*block), size - sizeof(*d));
return -EINVAL;
}
n = (size - sizeof(*d)) / sizeof(*block);
gw_cmd = le32_to_cpu(d->command);
wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
"gateway_addr_addr") ||
!wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
"gateway_value_addr") ||
!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
"gateway_cmd_addr") ||
!wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
"gateway_ctrl_address"))
return -EINVAL;
wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
" cmd 0x%08x ctl 0x%08x\n",
le32_to_cpu(d->gateway_addr_addr),
le32_to_cpu(d->gateway_value_addr),
le32_to_cpu(d->gateway_cmd_addr),
le32_to_cpu(d->gateway_ctrl_address));
for (i = 0; i < n; i++) {
int rc;
u32 a = le32_to_cpu(block[i].addr);
u32 v = le32_to_cpu(block[i].value);
wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n",
i, a, v);
writel(v, gwa_val);
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
if (rc)
return rc;
}
return 0;
}
static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_gateway_data4 *d = data;
const struct wil_fw_data_gw4 *block = d->data;
void __iomem *gwa_addr;
void __iomem *gwa_val[ARRAY_SIZE(block->value)];
void __iomem *gwa_cmd;
void __iomem *gwa_ctl;
u32 gw_cmd;
int n, i, k;
if (size < sizeof(*d) + sizeof(*block)) {
wil_err_fw(wil, "gateway4 record too short: %zu\n", size);
return -EINVAL;
}
if ((size - sizeof(*d)) % sizeof(*block)) {
wil_err_fw(wil, "gateway4 record data size"
" not aligned on %zu: %zu\n",
sizeof(*block), size - sizeof(*d));
return -EINVAL;
}
n = (size - sizeof(*d)) / sizeof(*block);
gw_cmd = le32_to_cpu(d->command);
wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
"gateway_addr_addr"))
return -EINVAL;
for (k = 0; k < ARRAY_SIZE(block->value); k++)
if (!wil_fw_addr_check(wil, &gwa_val[k],
d->gateway_value_addr[k],
0, "gateway_value_addr"))
return -EINVAL;
if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
"gateway_cmd_addr") ||
!wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
"gateway_ctrl_address"))
return -EINVAL;
wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
le32_to_cpu(d->gateway_addr_addr),
le32_to_cpu(d->gateway_cmd_addr),
le32_to_cpu(d->gateway_ctrl_address));
wil_hex_dump_fw("val addresses: ", DUMP_PREFIX_NONE, 16, 4,
d->gateway_value_addr, sizeof(d->gateway_value_addr),
false);
for (i = 0; i < n; i++) {
int rc;
u32 a = le32_to_cpu(block[i].addr);
u32 v[ARRAY_SIZE(block->value)];
for (k = 0; k < ARRAY_SIZE(block->value); k++)
v[k] = le32_to_cpu(block[i].value[k]);
wil_dbg_fw(wil, " gw4 write[%3d] [0x%08x] <==\n", i, a);
wil_hex_dump_fw(" val ", DUMP_PREFIX_NONE, 16, 4, v,
sizeof(v), false);
for (k = 0; k < ARRAY_SIZE(block->value); k++)
writel(v[k], gwa_val[k]);
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
if (rc)
return rc;
}
return 0;
}
static const struct {
int type;
int (*load_handler)(struct wil6210_priv *wil, const void *data,
size_t size);
int (*parse_handler)(struct wil6210_priv *wil, const void *data,
size_t size);
} wil_fw_handlers[] = {
{wil_fw_type_comment, fw_handle_comment, fw_handle_comment},
{wil_fw_type_data, fw_handle_data, fw_ignore_section},
{wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
/* wil_fw_type_action */
/* wil_fw_type_verify */
{wil_fw_type_file_header, fw_handle_file_header,
fw_handle_file_header},
{wil_fw_type_direct_write, fw_handle_direct_write, fw_ignore_section},
{wil_fw_type_gateway_data, fw_handle_gateway_data, fw_ignore_section},
{wil_fw_type_gateway_data4, fw_handle_gateway_data4,
fw_ignore_section},
};
static int wil_fw_handle_record(struct wil6210_priv *wil, int type,
const void *data, size_t size, bool load)
{
int i;
for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++)
if (wil_fw_handlers[i].type == type)
return load ?
wil_fw_handlers[i].load_handler(
wil, data, size) :
wil_fw_handlers[i].parse_handler(
wil, data, size);
wil_err_fw(wil, "unknown record type: %d\n", type);
return -EINVAL;
}
/**
* wil_fw_process - process section from FW file
* if load is true: Load the FW and uCode code and data to the
* corresponding device memory regions,
* otherwise only parse and look for capabilities
*
* Return error code
*/
static int wil_fw_process(struct wil6210_priv *wil, const void *data,
size_t size, bool load)
{
int rc = 0;
const struct wil_fw_record_head *hdr;
size_t s, hdr_sz;
for (hdr = data;; hdr = (const void *)hdr + s, size -= s) {
if (size < sizeof(*hdr))
break;
hdr_sz = le32_to_cpu(hdr->size);
s = sizeof(*hdr) + hdr_sz;
if (s > size)
break;
if (hdr_sz % 4) {
wil_err_fw(wil, "unaligned record size: %zu\n",
hdr_sz);
return -EINVAL;
}
rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type),
&hdr[1], hdr_sz, load);
if (rc)
return rc;
}
if (size) {
wil_err_fw(wil, "unprocessed bytes: %zu\n", size);
if (size >= sizeof(*hdr)) {
wil_err_fw(wil, "Stop at offset %ld"
" record type %d [%zd bytes]\n",
(long)((const void *)hdr - data),
le16_to_cpu(hdr->type), hdr_sz);
}
return -EINVAL;
}
return rc;
}
/**
* wil_request_firmware - Request firmware
*
* Request firmware image from the file
* If load is true, load firmware to device, otherwise
* only parse and extract capabilities
*
* Return error code
*/
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
bool load)
{
int rc, rc1;
const struct firmware *fw;
size_t sz;
const void *d;
rc = request_firmware(&fw, name, wil_to_dev(wil));
if (rc) {
wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc);
return rc;
}
wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
/* re-initialize board info params */
wil->num_of_brd_entries = 0;
kfree(wil->brd_info);
wil->brd_info = NULL;
for (sz = fw->size, d = fw->data; sz; sz -= rc1, d += rc1) {
rc1 = wil_fw_verify(wil, d, sz);
if (rc1 < 0) {
rc = rc1;
goto out;
}
rc = wil_fw_process(wil, d, rc1, load);
if (rc < 0)
goto out;
}
out:
release_firmware(fw);
if (rc)
wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
return rc;
}
/**
* wil_brd_process - process section from BRD file
*
* Return error code
*/
static int wil_brd_process(struct wil6210_priv *wil, const void *data,
size_t size)
{
int rc = 0;
const struct wil_fw_record_head *hdr = data;
size_t s, hdr_sz = 0;
u16 type;
int i = 0;
/* Assuming the board file includes only one file header
* and one or several data records.
* Each record starts with wil_fw_record_head.
*/
if (size < sizeof(*hdr))
return -EINVAL;
s = sizeof(*hdr) + le32_to_cpu(hdr->size);
if (s > size)
return -EINVAL;
/* Skip the header record and handle the data records */
size -= s;
for (hdr = data + s;; hdr = (const void *)hdr + s, size -= s, i++) {
if (size < sizeof(*hdr))
break;
if (i >= wil->num_of_brd_entries) {
wil_err_fw(wil,
"Too many brd records: %d, num of expected entries %d\n",
i, wil->num_of_brd_entries);
break;
}
hdr_sz = le32_to_cpu(hdr->size);
s = sizeof(*hdr) + hdr_sz;
if (wil->brd_info[i].file_max_size &&
hdr_sz > wil->brd_info[i].file_max_size)
return -EINVAL;
if (sizeof(*hdr) + hdr_sz > size)
return -EINVAL;
if (hdr_sz % 4) {
wil_err_fw(wil, "unaligned record size: %zu\n",
hdr_sz);
return -EINVAL;
}
type = le16_to_cpu(hdr->type);
if (type != wil_fw_type_data) {
wil_err_fw(wil,
"invalid record type for board file: %d\n",
type);
return -EINVAL;
}
if (hdr_sz < sizeof(struct wil_fw_record_data)) {
wil_err_fw(wil, "data record too short: %zu\n", hdr_sz);
return -EINVAL;
}
wil_dbg_fw(wil,
"using info from fw file for record %d: addr[0x%08x], max size %d\n",
i, wil->brd_info[i].file_addr,
wil->brd_info[i].file_max_size);
rc = __fw_handle_data(wil, &hdr[1], hdr_sz,
cpu_to_le32(wil->brd_info[i].file_addr));
if (rc)
return rc;
}
if (size) {
wil_err_fw(wil, "unprocessed bytes: %zu\n", size);
if (size >= sizeof(*hdr)) {
wil_err_fw(wil,
"Stop at offset %ld record type %d [%zd bytes]\n",
(long)((const void *)hdr - data),
le16_to_cpu(hdr->type), hdr_sz);
}
return -EINVAL;
}
return 0;
}
/**
* wil_request_board - Request board file
*
* Request board image from the file
* board file address and max size are read from FW file
* during initialization.
* brd file shall include one header and one data section.
*
* Return error code
*/
int wil_request_board(struct wil6210_priv *wil, const char *name)
{
int rc, dlen;
const struct firmware *brd;
rc = request_firmware(&brd, name, wil_to_dev(wil));
if (rc) {
wil_err_fw(wil, "Failed to load brd %s\n", name);
return rc;
}
wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, brd->size);
/* Verify the header */
dlen = wil_fw_verify(wil, brd->data, brd->size);
if (dlen < 0) {
rc = dlen;
goto out;
}
/* Process the data records */
rc = wil_brd_process(wil, brd->data, dlen);
out:
release_firmware(brd);
if (rc)
wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
return rc;
}
/**
* wil_fw_verify_file_exists - checks if firmware file exist
*
* @wil: driver context
* @name: firmware file name
*
* return value - boolean, true for success, false for failure
*/
bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name)
{
const struct firmware *fw;
int rc;
rc = request_firmware(&fw, name, wil_to_dev(wil));
if (!rc)
release_firmware(fw);
else
wil_dbg_fw(wil, "<%s> not available: %d\n", name, rc);
return !rc;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/fw_inc.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/crc32.h>
#include "wil6210.h"
#include "fw.h"
MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT);
MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS);
MODULE_FIRMWARE(WIL_BOARD_FILE_NAME);
MODULE_FIRMWARE(WIL_FW_NAME_TALYN);
MODULE_FIRMWARE(WIL_BRD_NAME_TALYN);
static
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
size_t count)
{
volatile u32 __iomem *d = dst;
for (count += 4; count > 4; count -= 4)
__raw_writel(val, d++);
}
#include "fw_inc.c"
|
linux-master
|
drivers/net/wireless/ath/wil6210/fw.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <net/cfg80211.h>
#include "wil6210.h"
static int
wil_ethtoolops_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *cp,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
u32 tx_itr_en, tx_itr_val = 0;
u32 rx_itr_en, rx_itr_val = 0;
int ret;
mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
ret = wil_pm_runtime_get(wil);
if (ret < 0)
goto out;
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
wil_pm_runtime_put(wil);
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;
ret = 0;
out:
mutex_unlock(&wil->mutex);
return ret;
}
static int
wil_ethtoolops_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *cp,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct wireless_dev *wdev = ndev->ieee80211_ptr;
int ret;
mutex_lock(&wil->mutex);
wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n");
ret = -EINVAL;
goto out;
}
/* only @rx_coalesce_usecs and @tx_coalesce_usecs supported,
* ignore other parameters
*/
if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX ||
cp->tx_coalesce_usecs > WIL6210_ITR_TRSH_MAX)
goto out_bad;
wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
ret = wil_pm_runtime_get(wil);
if (ret < 0)
goto out;
wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);
ret = 0;
out:
mutex_unlock(&wil->mutex);
return ret;
out_bad:
wil_dbg_misc(wil, "Unsupported coalescing params. Raw command:\n");
print_hex_dump_debug("DBG[MISC] coal ", DUMP_PREFIX_OFFSET, 16, 4,
cp, sizeof(*cp), false);
mutex_unlock(&wil->mutex);
return -EINVAL;
}
static const struct ethtool_ops wil_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = cfg80211_get_drvinfo,
.get_coalesce = wil_ethtoolops_get_coalesce,
.set_coalesce = wil_ethtoolops_set_coalesce,
};
void wil_set_ethtoolops(struct net_device *ndev)
{
ndev->ethtool_ops = &wil_ethtool_ops;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/ethtool.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013,2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include "wil6210.h"
#include "trace.h"
void __wil_err(struct wil6210_priv *wil, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
netdev_err(wil->main_ndev, "%pV", &vaf);
trace_wil6210_log_err(&vaf);
va_end(args);
}
void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (!net_ratelimit())
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
netdev_err(wil->main_ndev, "%pV", &vaf);
trace_wil6210_log_err(&vaf);
va_end(args);
}
void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (!net_ratelimit())
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
netdev_dbg(wil->main_ndev, "%pV", &vaf);
trace_wil6210_log_dbg(&vaf);
va_end(args);
}
void __wil_info(struct wil6210_priv *wil, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
netdev_info(wil->main_ndev, "%pV", &vaf);
trace_wil6210_log_info(&vaf);
va_end(args);
}
void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
trace_wil6210_log_dbg(&vaf);
va_end(args);
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/debug.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "wil6210.h"
#include <linux/jiffies.h>
#include <linux/pm_runtime.h>
#define WIL6210_AUTOSUSPEND_DELAY_MS (1000)
static void wil_pm_wake_connected_net_queues(struct wil6210_priv *wil)
{
int i;
mutex_lock(&wil->vif_mutex);
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (vif && test_bit(wil_vif_fwconnected, vif->status))
wil_update_net_queues_bh(wil, vif, NULL, false);
}
mutex_unlock(&wil->vif_mutex);
}
static void wil_pm_stop_all_net_queues(struct wil6210_priv *wil)
{
int i;
mutex_lock(&wil->vif_mutex);
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (vif)
wil_update_net_queues_bh(wil, vif, NULL, true);
}
mutex_unlock(&wil->vif_mutex);
}
static bool
wil_can_suspend_vif(struct wil6210_priv *wil, struct wil6210_vif *vif,
bool is_runtime)
{
struct wireless_dev *wdev = vif_to_wdev(vif);
switch (wdev->iftype) {
case NL80211_IFTYPE_MONITOR:
wil_dbg_pm(wil, "Sniffer\n");
return false;
/* for STA-like interface, don't runtime suspend */
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
if (test_bit(wil_vif_fwconnecting, vif->status)) {
wil_dbg_pm(wil, "Delay suspend when connecting\n");
return false;
}
if (is_runtime) {
wil_dbg_pm(wil, "STA-like interface\n");
return false;
}
break;
/* AP-like interface - can't suspend */
default:
wil_dbg_pm(wil, "AP-like interface\n");
return false;
}
return true;
}
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0, i;
bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
wil->fw_capabilities);
bool active_ifaces;
wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
if (wmi_only || debug_fw) {
wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
wmi_only ? "wmi_only" : "debug_fw");
rc = -EBUSY;
goto out;
}
if (is_runtime && !wil->platform_ops.suspend) {
rc = -EBUSY;
goto out;
}
mutex_lock(&wil->vif_mutex);
active_ifaces = wil_has_active_ifaces(wil, true, false);
mutex_unlock(&wil->vif_mutex);
if (!active_ifaces) {
/* can always sleep when down */
wil_dbg_pm(wil, "Interface is down\n");
goto out;
}
if (test_bit(wil_status_resetting, wil->status)) {
wil_dbg_pm(wil, "Delay suspend when resetting\n");
rc = -EBUSY;
goto out;
}
if (wil->recovery_state != fw_recovery_idle) {
wil_dbg_pm(wil, "Delay suspend during recovery\n");
rc = -EBUSY;
goto out;
}
/* interface is running */
mutex_lock(&wil->vif_mutex);
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (!vif)
continue;
if (!wil_can_suspend_vif(wil, vif, is_runtime)) {
rc = -EBUSY;
mutex_unlock(&wil->vif_mutex);
goto out;
}
}
mutex_unlock(&wil->vif_mutex);
out:
wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
if (rc)
wil->suspend_stats.rejected_by_host++;
return rc;
}
static int wil_resume_keep_radio_on(struct wil6210_priv *wil)
{
int rc = 0;
/* wil_status_resuming will be cleared when getting
* WMI_TRAFFIC_RESUME_EVENTID
*/
set_bit(wil_status_resuming, wil->status);
clear_bit(wil_status_suspended, wil->status);
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
wil_unmask_irq(wil);
wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
/* Send WMI resume request to the device */
rc = wmi_resume(wil);
if (rc) {
wil_err(wil, "device failed to resume (%d)\n", rc);
if (no_fw_recovery)
goto out;
rc = wil_down(wil);
if (rc) {
wil_err(wil, "wil_down failed (%d)\n", rc);
goto out;
}
rc = wil_up(wil);
if (rc) {
wil_err(wil, "wil_up failed (%d)\n", rc);
goto out;
}
}
/* Wake all queues */
wil_pm_wake_connected_net_queues(wil);
out:
if (rc)
set_bit(wil_status_suspended, wil->status);
return rc;
}
static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
{
int rc = 0;
unsigned long data_comp_to;
wil_dbg_pm(wil, "suspend keep radio on\n");
/* Prevent handling of new tx and wmi commands */
rc = down_write_trylock(&wil->mem_lock);
if (!rc) {
wil_err(wil,
"device is busy. down_write_trylock failed, returned (0x%x)\n",
rc);
wil->suspend_stats.rejected_by_host++;
return -EBUSY;
}
set_bit(wil_status_suspending, wil->status);
up_write(&wil->mem_lock);
wil_pm_stop_all_net_queues(wil);
if (!wil_is_tx_idle(wil)) {
wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
}
if (!wil->txrx_ops.is_rx_idle(wil)) {
wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
}
if (!wil_is_wmi_idle(wil)) {
wil_dbg_pm(wil, "Pending WMI events, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
}
/* Send WMI suspend request to the device */
rc = wmi_suspend(wil);
if (rc) {
wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n",
rc);
goto reject_suspend;
}
/* Wait for completion of the pending RX packets */
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
while (!wil->txrx_ops.is_rx_idle(wil)) {
if (time_after(jiffies, data_comp_to)) {
if (wil->txrx_ops.is_rx_idle(wil))
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");
wil->suspend_stats.r_on.failed_suspends++;
goto resume_after_fail;
}
wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
napi_synchronize(&wil->napi_rx);
msleep(20);
}
}
/* In case of pending WMI events, reject the suspend
* and resume the device.
* This can happen if the device sent the WMI events before
* approving the suspend.
*/
if (!wil_is_wmi_idle(wil)) {
wil_err(wil, "suspend failed due to pending WMI events\n");
wil->suspend_stats.r_on.failed_suspends++;
goto resume_after_fail;
}
wil_mask_irq(wil);
/* Disable device reset on PERST */
wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
if (wil->platform_ops.suspend) {
rc = wil->platform_ops.suspend(wil->platform_handle, true);
if (rc) {
wil_err(wil, "platform device failed to suspend (%d)\n",
rc);
wil->suspend_stats.r_on.failed_suspends++;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
wil_unmask_irq(wil);
goto resume_after_fail;
}
}
/* Save the current bus request to return to the same in resume */
wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
wil6210_bus_request(wil, 0);
set_bit(wil_status_suspended, wil->status);
clear_bit(wil_status_suspending, wil->status);
return rc;
resume_after_fail:
set_bit(wil_status_resuming, wil->status);
clear_bit(wil_status_suspending, wil->status);
rc = wmi_resume(wil);
/* if resume succeeded, reject the suspend */
if (!rc) {
rc = -EBUSY;
wil_pm_wake_connected_net_queues(wil);
}
return rc;
reject_suspend:
clear_bit(wil_status_suspending, wil->status);
wil_pm_wake_connected_net_queues(wil);
return -EBUSY;
}
static int wil_suspend_radio_off(struct wil6210_priv *wil)
{
int rc = 0;
bool active_ifaces;
wil_dbg_pm(wil, "suspend radio off\n");
rc = down_write_trylock(&wil->mem_lock);
if (!rc) {
wil_err(wil,
"device is busy. down_write_trylock failed, returned (0x%x)\n",
rc);
wil->suspend_stats.rejected_by_host++;
return -EBUSY;
}
set_bit(wil_status_suspending, wil->status);
up_write(&wil->mem_lock);
/* if netif up, hardware is alive, shut it down */
mutex_lock(&wil->vif_mutex);
active_ifaces = wil_has_active_ifaces(wil, true, false);
mutex_unlock(&wil->vif_mutex);
if (active_ifaces) {
rc = wil_down(wil);
if (rc) {
wil_err(wil, "wil_down : %d\n", rc);
wil->suspend_stats.r_off.failed_suspends++;
goto out;
}
}
/* Disable PCIe IRQ to prevent sporadic IRQs when PCIe is suspending */
wil_dbg_pm(wil, "Disabling PCIe IRQ before suspending\n");
wil_disable_irq(wil);
if (wil->platform_ops.suspend) {
rc = wil->platform_ops.suspend(wil->platform_handle, false);
if (rc) {
wil_enable_irq(wil);
wil->suspend_stats.r_off.failed_suspends++;
goto out;
}
}
set_bit(wil_status_suspended, wil->status);
out:
clear_bit(wil_status_suspending, wil->status);
wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
return rc;
}
static int wil_resume_radio_off(struct wil6210_priv *wil)
{
int rc = 0;
bool active_ifaces;
wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
wil_enable_irq(wil);
/* if any netif up, bring hardware up
* During open(), IFF_UP set after actual device method
* invocation. This prevent recursive call to wil_up()
* wil_status_suspended will be cleared in wil_reset
*/
mutex_lock(&wil->vif_mutex);
active_ifaces = wil_has_active_ifaces(wil, true, false);
mutex_unlock(&wil->vif_mutex);
if (active_ifaces)
rc = wil_up(wil);
else
clear_bit(wil_status_suspended, wil->status);
return rc;
}
int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
{
int rc = 0;
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
if (test_bit(wil_status_suspended, wil->status)) {
wil_dbg_pm(wil, "trying to suspend while suspended\n");
return 0;
}
if (!keep_radio_on)
rc = wil_suspend_radio_off(wil);
else
rc = wil_suspend_keep_radio_on(wil);
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
return rc;
}
int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
{
int rc = 0;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
if (wil->platform_ops.resume) {
rc = wil->platform_ops.resume(wil->platform_handle,
keep_radio_on);
if (rc) {
wil_err(wil, "platform_ops.resume : %d\n", rc);
goto out;
}
}
if (keep_radio_on)
rc = wil_resume_keep_radio_on(wil);
else
rc = wil_resume_radio_off(wil);
out:
wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system",
rc);
return rc;
}
void wil_pm_runtime_allow(struct wil6210_priv *wil)
{
struct device *dev = wil_to_dev(wil);
pm_runtime_put_noidle(dev);
pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
pm_runtime_allow(dev);
}
void wil_pm_runtime_forbid(struct wil6210_priv *wil)
{
struct device *dev = wil_to_dev(wil);
pm_runtime_forbid(dev);
pm_runtime_get_noresume(dev);
}
int wil_pm_runtime_get(struct wil6210_priv *wil)
{
int rc;
struct device *dev = wil_to_dev(wil);
rc = pm_runtime_resume_and_get(dev);
if (rc < 0) {
wil_err(wil, "pm_runtime_resume_and_get() failed, rc = %d\n", rc);
return rc;
}
return 0;
}
void wil_pm_runtime_put(struct wil6210_priv *wil)
{
struct device *dev = wil_to_dev(wil);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/pm.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/moduleparam.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include "wil6210.h"
#include "txrx.h"
#include "txrx_edma.h"
#include "wmi.h"
#include "boot_loader.h"
#define WAIT_FOR_HALP_VOTE_MS 100
#define WAIT_FOR_SCAN_ABORT_MS 1000
#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1
#define WIL_BOARD_FILE_MAX_NAMELEN 128
bool debug_fw; /* = false; */
module_param(debug_fw, bool, 0444);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
static u8 oob_mode;
module_param(oob_mode, byte, 0444);
MODULE_PARM_DESC(oob_mode,
" enable out of the box (OOB) mode in FW, for diagnostics and certification");
bool no_fw_recovery;
module_param(no_fw_recovery, bool, 0644);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
/* if not set via modparam, will be set to default value of 1/8 of
* rx ring size during init flow
*/
unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT;
module_param(rx_ring_overflow_thrsh, ushort, 0444);
MODULE_PARM_DESC(rx_ring_overflow_thrsh,
" RX ring overflow threshold in descriptors.");
/* We allow allocation of more than 1 page buffers to support large packets.
* It is suboptimal behavior performance wise in case MTU above page size.
*/
unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
static int mtu_max_set(const char *val, const struct kernel_param *kp)
{
int ret;
/* sets mtu_max directly. no need to restore it in case of
* illegal value since we assume this will fail insmod
*/
ret = param_set_uint(val, kp);
if (ret)
return ret;
if (mtu_max < 68 || mtu_max > WIL_MAX_ETH_MTU)
ret = -EINVAL;
return ret;
}
static const struct kernel_param_ops mtu_max_ops = {
.set = mtu_max_set,
.get = param_get_uint,
};
module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444);
MODULE_PARM_DESC(mtu_max, " Max MTU value.");
static uint rx_ring_order;
static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
static int ring_order_set(const char *val, const struct kernel_param *kp)
{
int ret;
uint x;
ret = kstrtouint(val, 0, &x);
if (ret)
return ret;
if ((x < WIL_RING_SIZE_ORDER_MIN) || (x > WIL_RING_SIZE_ORDER_MAX))
return -EINVAL;
*((uint *)kp->arg) = x;
return 0;
}
static const struct kernel_param_ops ring_order_ops = {
.set = ring_order_set,
.get = param_get_uint,
};
module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, 0444);
MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, 0444);
MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
enum {
WIL_BOOT_ERR,
WIL_BOOT_VANILLA,
WIL_BOOT_PRODUCTION,
WIL_BOOT_DEVELOPMENT,
};
enum {
WIL_SIG_STATUS_VANILLA = 0x0,
WIL_SIG_STATUS_DEVELOPMENT = 0x1,
WIL_SIG_STATUS_PRODUCTION = 0x2,
WIL_SIG_STATUS_CORRUPTED_PRODUCTION = 0x3,
};
#define RST_DELAY (20) /* msec, for loop in @wil_wait_device_ready */
#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
#define PMU_READY_DELAY_MS (4) /* ms, for sleep in @wil_wait_device_ready */
#define OTP_HW_DELAY (200) /* usec, loop in @wil_wait_device_ready_talyn_mb */
/* round up to be above 2 ms total */
#define OTP_HW_COUNT (1 + 2000 / OTP_HW_DELAY)
/*
* Due to a hardware issue,
* one has to read/write to/from NIC in 32-bit chunks;
* regular memcpy_fromio and siblings will
* not work on 64-bit platform - it uses 64-bit transactions
*
* Force 32-bit transactions to enable NIC on 64-bit platforms
*
* To avoid byte swap on big endian host, __raw_{read|write}l
* should be used - {read|write}l would swap bytes to provide
* little endian on PCI value in host endianness.
*/
void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count)
{
u32 *d = dst;
const volatile u32 __iomem *s = src;
for (; count >= 4; count -= 4)
*d++ = __raw_readl(s++);
if (unlikely(count)) {
/* count can be 1..3 */
u32 tmp = __raw_readl(s);
memcpy(d, &tmp, count);
}
}
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count)
{
volatile u32 __iomem *d = dst;
const u32 *s = src;
for (; count >= 4; count -= 4)
__raw_writel(*s++, d++);
if (unlikely(count)) {
/* count can be 1..3 */
u32 tmp = 0;
memcpy(&tmp, s, count);
__raw_writel(tmp, d);
}
}
/* Device memory access is prohibited while reset or suspend.
* wil_mem_access_lock protects accessing device memory in these cases
*/
int wil_mem_access_lock(struct wil6210_priv *wil)
{
if (!down_read_trylock(&wil->mem_lock))
return -EBUSY;
if (test_bit(wil_status_suspending, wil->status) ||
test_bit(wil_status_suspended, wil->status)) {
up_read(&wil->mem_lock);
return -EBUSY;
}
return 0;
}
void wil_mem_access_unlock(struct wil6210_priv *wil)
{
up_read(&wil->mem_lock);
}
static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
{
struct wil_ring *ring = &wil->ring_tx[id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
lockdep_assert_held(&wil->mutex);
if (!ring->va)
return;
wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->mid = U8_MAX;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
/* napi_synchronize waits for completion of the current NAPI but will
* not prevent the next NAPI run.
* Add a memory barrier to guarantee that txdata->enabled is zeroed
* before napi_synchronize so that the next scheduled NAPI will not
* handle this vring
*/
wmb();
/* make sure NAPI won't touch this vring */
if (test_bit(wil_status_napi_en, wil->status))
napi_synchronize(&wil->napi_tx);
wil->txrx_ops.ring_fini_tx(wil, ring);
}
static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
{
int i;
for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].mid == mid &&
wil->sta[i].status == wil_sta_connected)
return true;
}
return false;
}
static void wil_disconnect_cid_complete(struct wil6210_vif *vif, int cid,
u16 reason_code)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
uint i;
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct wireless_dev *wdev = vif_to_wdev(vif);
struct wil_sta_info *sta = &wil->sta[cid];
int min_ring_id = wil_get_min_tx_ring_id(wil);
might_sleep();
wil_dbg_misc(wil,
"disconnect_cid_complete: CID %d, MID %d, status %d\n",
cid, sta->mid, sta->status);
/* inform upper layers */
if (sta->status != wil_sta_unused) {
if (vif->mid != sta->mid) {
wil_err(wil, "STA MID mismatch with VIF MID(%d)\n",
vif->mid);
}
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
/* AP-like interface */
cfg80211_del_sta(ndev, sta->addr, GFP_KERNEL);
break;
default:
break;
}
sta->status = wil_sta_unused;
sta->mid = U8_MAX;
}
/* reorder buffers */
for (i = 0; i < WIL_STA_TID_NUM; i++) {
struct wil_tid_ampdu_rx *r;
spin_lock_bh(&sta->tid_rx_lock);
r = sta->tid_rx[i];
sta->tid_rx[i] = NULL;
wil_tid_ampdu_rx_free(wil, r);
spin_unlock_bh(&sta->tid_rx_lock);
}
/* crypto context */
memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
/* release vrings */
for (i = min_ring_id; i < ARRAY_SIZE(wil->ring_tx); i++) {
if (wil->ring2cid_tid[i][0] == cid)
wil_ring_fini_tx(wil, i);
}
/* statistics */
memset(&sta->stats, 0, sizeof(sta->stats));
sta->stats.tx_latency_min_us = U32_MAX;
}
static void _wil6210_disconnect_complete(struct wil6210_vif *vif,
const u8 *bssid, u16 reason_code)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int cid = -ENOENT;
struct net_device *ndev;
struct wireless_dev *wdev;
ndev = vif_to_ndev(vif);
wdev = vif_to_wdev(vif);
might_sleep();
wil_info(wil, "disconnect_complete: bssid=%pM, reason=%d\n",
bssid, reason_code);
/* Cases are:
* - disconnect single STA, still connected
* - disconnect single STA, already disconnected
* - disconnect all
*
* For "disconnect all", there are 3 options:
* - bssid == NULL
* - bssid is broadcast address (ff:ff:ff:ff:ff:ff)
* - bssid is our MAC address
*/
if (bssid && !is_broadcast_ether_addr(bssid) &&
!ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
cid = wil_find_cid(wil, vif->mid, bssid);
wil_dbg_misc(wil,
"Disconnect complete %pM, CID=%d, reason=%d\n",
bssid, cid, reason_code);
if (wil_cid_valid(wil, cid)) /* disconnect 1 peer */
wil_disconnect_cid_complete(vif, cid, reason_code);
} else { /* all */
wil_dbg_misc(wil, "Disconnect complete all\n");
for (cid = 0; cid < wil->max_assoc_sta; cid++)
wil_disconnect_cid_complete(vif, cid, reason_code);
}
/* link state */
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
wil_bcast_fini(vif);
wil_update_net_queues_bh(wil, vif, NULL, true);
netif_carrier_off(ndev);
if (!wil_has_other_active_ifaces(wil, ndev, false, true))
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
if (test_and_clear_bit(wil_vif_fwconnected, vif->status)) {
atomic_dec(&wil->connected_vifs);
cfg80211_disconnected(ndev, reason_code,
NULL, 0,
vif->locally_generated_disc,
GFP_KERNEL);
vif->locally_generated_disc = false;
} else if (test_bit(wil_vif_fwconnecting, vif->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
vif->bss = NULL;
}
clear_bit(wil_vif_fwconnecting, vif->status);
clear_bit(wil_vif_ft_roam, vif->status);
vif->ptk_rekey_state = WIL_REKEY_IDLE;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (!wil_vif_is_connected(wil, vif->mid)) {
wil_update_net_queues_bh(wil, vif, NULL, true);
if (test_and_clear_bit(wil_vif_fwconnected,
vif->status))
atomic_dec(&wil->connected_vifs);
} else {
wil_update_net_queues_bh(wil, vif, NULL, false);
}
break;
default:
break;
}
}
static int wil_disconnect_cid(struct wil6210_vif *vif, int cid,
u16 reason_code)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wireless_dev *wdev = vif_to_wdev(vif);
struct wil_sta_info *sta = &wil->sta[cid];
bool del_sta = false;
might_sleep();
wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n",
cid, sta->mid, sta->status);
if (sta->status == wil_sta_unused)
return 0;
if (vif->mid != sta->mid) {
wil_err(wil, "STA MID mismatch with VIF MID(%d)\n", vif->mid);
return -EINVAL;
}
/* inform lower layers */
if (wdev->iftype == NL80211_IFTYPE_AP && disable_ap_sme)
del_sta = true;
/* disconnect by sending command disconnect/del_sta and wait
* synchronously for WMI_DISCONNECT_EVENTID event.
*/
return wmi_disconnect_sta(vif, sta->addr, reason_code, del_sta);
}
static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
u16 reason_code)
{
struct wil6210_priv *wil;
struct net_device *ndev;
int cid = -ENOENT;
if (unlikely(!vif))
return;
wil = vif_to_wil(vif);
ndev = vif_to_ndev(vif);
might_sleep();
wil_info(wil, "disconnect bssid=%pM, reason=%d\n", bssid, reason_code);
/* Cases are:
* - disconnect single STA, still connected
* - disconnect single STA, already disconnected
* - disconnect all
*
* For "disconnect all", there are 3 options:
* - bssid == NULL
* - bssid is broadcast address (ff:ff:ff:ff:ff:ff)
* - bssid is our MAC address
*/
if (bssid && !is_broadcast_ether_addr(bssid) &&
!ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
cid = wil_find_cid(wil, vif->mid, bssid);
wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
bssid, cid, reason_code);
if (wil_cid_valid(wil, cid)) /* disconnect 1 peer */
wil_disconnect_cid(vif, cid, reason_code);
} else { /* all */
wil_dbg_misc(wil, "Disconnect all\n");
for (cid = 0; cid < wil->max_assoc_sta; cid++)
wil_disconnect_cid(vif, cid, reason_code);
}
/* call event handler manually after processing wmi_call,
* to avoid deadlock - disconnect event handler acquires
* wil->mutex while it is already held here
*/
_wil6210_disconnect_complete(vif, bssid, reason_code);
}
void wil_disconnect_worker(struct work_struct *work)
{
struct wil6210_vif *vif = container_of(work,
struct wil6210_vif, disconnect_worker);
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_disconnect_event evt;
} __packed reply;
if (test_bit(wil_vif_fwconnected, vif->status))
/* connect succeeded after all */
return;
if (!test_bit(wil_vif_fwconnecting, vif->status))
/* already disconnected */
return;
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_DISCONNECT_CMDID, vif->mid, NULL, 0,
WMI_DISCONNECT_EVENTID, &reply, sizeof(reply),
WIL6210_DISCONNECT_TO_MS);
if (rc) {
wil_err(wil, "disconnect error %d\n", rc);
return;
}
wil_update_net_queues_bh(wil, vif, NULL, true);
netif_carrier_off(ndev);
cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL);
clear_bit(wil_vif_fwconnecting, vif->status);
}
static int wil_wait_for_recovery(struct wil6210_priv *wil)
{
if (wait_event_interruptible(wil->wq, wil->recovery_state !=
fw_recovery_pending)) {
wil_err(wil, "Interrupt, canceling recovery\n");
return -ERESTARTSYS;
}
if (wil->recovery_state != fw_recovery_running) {
wil_info(wil, "Recovery cancelled\n");
return -EINTR;
}
wil_info(wil, "Proceed with recovery\n");
return 0;
}
void wil_set_recovery_state(struct wil6210_priv *wil, int state)
{
wil_dbg_misc(wil, "set_recovery_state: %d -> %d\n",
wil->recovery_state, state);
wil->recovery_state = state;
wake_up_interruptible(&wil->wq);
}
bool wil_is_recovery_blocked(struct wil6210_priv *wil)
{
return no_fw_recovery && (wil->recovery_state == fw_recovery_pending);
}
static void wil_fw_error_worker(struct work_struct *work)
{
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
fw_error_worker);
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev;
wil_dbg_misc(wil, "fw error worker\n");
if (!ndev || !(ndev->flags & IFF_UP)) {
wil_info(wil, "No recovery - interface is down\n");
return;
}
wdev = ndev->ieee80211_ptr;
/* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
* passed since last recovery attempt
*/
if (time_is_after_jiffies(wil->last_fw_recovery +
WIL6210_FW_RECOVERY_TO))
wil->recovery_count++;
else
wil->recovery_count = 1; /* fw was alive for a long time */
if (wil->recovery_count > WIL6210_FW_RECOVERY_RETRIES) {
wil_err(wil, "too many recovery attempts (%d), giving up\n",
wil->recovery_count);
return;
}
wil->last_fw_recovery = jiffies;
wil_info(wil, "fw error recovery requested (try %d)...\n",
wil->recovery_count);
if (!no_fw_recovery)
wil->recovery_state = fw_recovery_running;
if (wil_wait_for_recovery(wil) != 0)
return;
rtnl_lock();
mutex_lock(&wil->mutex);
/* Needs adaptation for multiple VIFs
* need to go over all VIFs and consider the appropriate
* recovery because each one can have different iftype.
*/
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_MONITOR:
/* silent recovery, upper layers will see disconnect */
__wil_down(wil);
__wil_up(wil);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (no_fw_recovery) /* upper layers do recovery */
break;
/* silent recovery, upper layers will see disconnect */
__wil_down(wil);
__wil_up(wil);
mutex_unlock(&wil->mutex);
wil_cfg80211_ap_recovery(wil);
mutex_lock(&wil->mutex);
wil_info(wil, "... completed\n");
break;
default:
wil_err(wil, "No recovery - unknown interface type %d\n",
wdev->iftype);
break;
}
mutex_unlock(&wil->mutex);
rtnl_unlock();
}
static int wil_find_free_ring(struct wil6210_priv *wil)
{
int i;
int min_ring_id = wil_get_min_tx_ring_id(wil);
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
if (!wil->ring_tx[i].va)
return i;
}
return -EINVAL;
}
int wil_ring_init_tx(struct wil6210_vif *vif, int cid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc = -EINVAL, ringid;
if (cid < 0) {
wil_err(wil, "No connection pending\n");
goto out;
}
ringid = wil_find_free_ring(wil);
if (ringid < 0) {
wil_err(wil, "No free vring found\n");
goto out;
}
wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n",
cid, vif->mid, ringid);
rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order,
cid, 0);
if (rc)
wil_err(wil, "init TX for CID %d MID %d vring %d failed\n",
cid, vif->mid, ringid);
out:
return rc;
}
int wil_bcast_init(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int ri = vif->bcast_ring, rc;
if (ri >= 0 && wil->ring_tx[ri].va)
return 0;
ri = wil_find_free_ring(wil);
if (ri < 0)
return ri;
vif->bcast_ring = ri;
rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order);
if (rc)
vif->bcast_ring = -1;
return rc;
}
void wil_bcast_fini(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int ri = vif->bcast_ring;
if (ri < 0)
return;
vif->bcast_ring = -1;
wil_ring_fini_tx(wil, ri);
}
void wil_bcast_fini_all(struct wil6210_priv *wil)
{
int i;
struct wil6210_vif *vif;
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif)
wil_bcast_fini(vif);
}
}
int wil_priv_init(struct wil6210_priv *wil)
{
uint i;
wil_dbg_misc(wil, "priv_init\n");
memset(wil->sta, 0, sizeof(wil->sta));
for (i = 0; i < WIL6210_MAX_CID; i++) {
spin_lock_init(&wil->sta[i].tid_rx_lock);
wil->sta[i].mid = U8_MAX;
}
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
spin_lock_init(&wil->ring_tx_data[i].lock);
wil->ring2cid_tid[i][0] = WIL6210_MAX_CID;
}
mutex_init(&wil->mutex);
mutex_init(&wil->vif_mutex);
mutex_init(&wil->wmi_mutex);
mutex_init(&wil->halp.lock);
init_completion(&wil->wmi_ready);
init_completion(&wil->wmi_call);
init_completion(&wil->halp.comp);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
INIT_LIST_HEAD(&wil->pending_wmi_ev);
spin_lock_init(&wil->wmi_ev_lock);
spin_lock_init(&wil->net_queue_lock);
spin_lock_init(&wil->eap_lock);
init_waitqueue_head(&wil->wq);
init_rwsem(&wil->mem_lock);
wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
if (!wil->wmi_wq)
return -EAGAIN;
wil->wq_service = create_singlethread_workqueue(WIL_NAME "_service");
if (!wil->wq_service)
goto out_wmi_wq;
wil->last_fw_recovery = jiffies;
wil->tx_interframe_timeout = WIL6210_ITR_TX_INTERFRAME_TIMEOUT_DEFAULT;
wil->rx_interframe_timeout = WIL6210_ITR_RX_INTERFRAME_TIMEOUT_DEFAULT;
wil->tx_max_burst_duration = WIL6210_ITR_TX_MAX_BURST_DURATION_DEFAULT;
wil->rx_max_burst_duration = WIL6210_ITR_RX_MAX_BURST_DURATION_DEFAULT;
if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT)
rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT;
wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
WMI_WAKEUP_TRIGGER_BCAST;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
wil->ring_idle_trsh = 16;
wil->reply_mid = U8_MAX;
wil->max_vifs = 1;
wil->max_assoc_sta = max_assoc_sta;
/* edma configuration can be updated via debugfs before allocation */
wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
/* Rx status ring size should be bigger than the number of RX buffers
* in order to prevent backpressure on the status ring, which may
* cause HW freeze.
*/
wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
/* Number of RX buffer IDs should be bigger than the RX descriptor
* ring size as in HW reorder flow, the HW can consume additional
* buffers before releasing the previous ones.
*/
wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
wil->amsdu_en = true;
return 0;
out_wmi_wq:
destroy_workqueue(wil->wmi_wq);
return -EAGAIN;
}
void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
{
if (wil->platform_ops.bus_request) {
wil->bus_request_kbps = kbps;
wil->platform_ops.bus_request(wil->platform_handle, kbps);
}
}
/**
* wil6210_disconnect - disconnect one connection
* @vif: virtual interface context
* @bssid: peer to disconnect, NULL to disconnect all
* @reason_code: Reason code for the Disassociation frame
*
* Disconnect and release associated resources. Issue WMI
* command(s) to trigger MAC disconnect. When command was issued
* successfully, call the wil6210_disconnect_complete function
* to handle the event synchronously
*/
void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
u16 reason_code)
{
struct wil6210_priv *wil = vif_to_wil(vif);
wil_dbg_misc(wil, "disconnecting\n");
del_timer_sync(&vif->connect_timer);
_wil6210_disconnect(vif, bssid, reason_code);
}
/**
* wil6210_disconnect_complete - handle disconnect event
* @vif: virtual interface context
* @bssid: peer to disconnect, NULL to disconnect all
* @reason_code: Reason code for the Disassociation frame
*
* Release associated resources and indicate upper layers the
* connection is terminated.
*/
void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid,
u16 reason_code)
{
struct wil6210_priv *wil = vif_to_wil(vif);
wil_dbg_misc(wil, "got disconnect\n");
del_timer_sync(&vif->connect_timer);
_wil6210_disconnect_complete(vif, bssid, reason_code);
}
void wil_priv_deinit(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "priv_deinit\n");
wil_set_recovery_state(wil, fw_recovery_idle);
cancel_work_sync(&wil->fw_error_worker);
wmi_event_flush(wil);
destroy_workqueue(wil->wq_service);
destroy_workqueue(wil->wmi_wq);
kfree(wil->brd_info);
}
static void wil_shutdown_bl(struct wil6210_priv *wil)
{
u32 val;
wil_s(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_shutdown_handshake), BL_SHUTDOWN_HS_GRTD);
usleep_range(100, 150);
val = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_shutdown_handshake));
if (val & BL_SHUTDOWN_HS_RTD) {
wil_dbg_misc(wil, "BL is ready for halt\n");
return;
}
wil_err(wil, "BL did not report ready for halt\n");
}
/* this format is used by ARC embedded CPU for instruction memory */
static inline u32 ARC_me_imm32(u32 d)
{
return ((d & 0xffff0000) >> 16) | ((d & 0x0000ffff) << 16);
}
/* defines access to interrupt vectors for wil_freeze_bl */
#define ARC_IRQ_VECTOR_OFFSET(N) ((N) * 8)
/* ARC long jump instruction */
#define ARC_JAL_INST (0x20200f80)
static void wil_freeze_bl(struct wil6210_priv *wil)
{
u32 jal, upc, saved;
u32 ivt3 = ARC_IRQ_VECTOR_OFFSET(3);
jal = wil_r(wil, wil->iccm_base + ivt3);
if (jal != ARC_me_imm32(ARC_JAL_INST)) {
wil_dbg_misc(wil, "invalid IVT entry found, skipping\n");
return;
}
/* prevent the target from entering deep sleep
* and disabling memory access
*/
saved = wil_r(wil, RGF_USER_USAGE_8);
wil_w(wil, RGF_USER_USAGE_8, saved | BIT_USER_PREVENT_DEEP_SLEEP);
usleep_range(20, 25); /* let the BL process the bit */
/* redirect to endless loop in the INT_L1 context and let it trap */
wil_w(wil, wil->iccm_base + ivt3 + 4, ARC_me_imm32(ivt3));
usleep_range(20, 25); /* let the BL get into the trap */
/* verify the BL is frozen */
upc = wil_r(wil, RGF_USER_CPU_PC);
if (upc < ivt3 || (upc > (ivt3 + 8)))
wil_dbg_misc(wil, "BL freeze failed, PC=0x%08X\n", upc);
wil_w(wil, RGF_USER_USAGE_8, saved);
}
static void wil_bl_prepare_halt(struct wil6210_priv *wil)
{
u32 tmp, ver;
/* before halting device CPU driver must make sure BL is not accessing
* host memory. This is done differently depending on BL version:
* 1. For very old BL versions the procedure is skipped
* (not supported).
* 2. For old BL version we use a special trick to freeze the BL
* 3. For new BL versions we shutdown the BL using handshake procedure.
*/
tmp = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v0,
boot_loader_struct_version));
if (!tmp) {
wil_dbg_misc(wil, "old BL, skipping halt preparation\n");
return;
}
tmp = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_shutdown_handshake));
ver = BL_SHUTDOWN_HS_PROT_VER(tmp);
if (ver > 0)
wil_shutdown_bl(wil);
else
wil_freeze_bl(wil);
}
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
if (wil->hw_version >= HW_VER_TALYN_MB) {
wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB,
BIT_USER_USER_CPU_MAN_RST);
wil_w(wil, RGF_USER_MAC_CPU_0_TALYN_MB,
BIT_USER_MAC_CPU_MAN_RST);
} else {
wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
}
}
static inline void wil_release_cpu(struct wil6210_priv *wil)
{
/* Start CPU */
if (wil->hw_version >= HW_VER_TALYN_MB)
wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, 1);
else
wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
{
wil_info(wil, "oob_mode to %d\n", mode);
switch (mode) {
case 0:
wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE |
BIT_USER_OOB_R2_MODE);
break;
case 1:
wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
break;
case 2:
wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
break;
default:
wil_err(wil, "invalid oob_mode: %d\n", mode);
}
}
static int wil_wait_device_ready(struct wil6210_priv *wil, int no_flash)
{
int delay = 0;
u32 x, x1 = 0;
/* wait until device ready. */
if (no_flash) {
msleep(PMU_READY_DELAY_MS);
wil_dbg_misc(wil, "Reset completed\n");
} else {
do {
msleep(RST_DELAY);
x = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v0,
boot_loader_ready));
if (x1 != x) {
wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
x1, x);
x1 = x;
}
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
x);
return -ETIME;
}
} while (x != BL_READY);
wil_dbg_misc(wil, "Reset completed in %d ms\n",
delay * RST_DELAY);
}
return 0;
}
static int wil_wait_device_ready_talyn_mb(struct wil6210_priv *wil)
{
u32 otp_hw;
u8 signature_status;
bool otp_signature_err;
bool hw_section_done;
u32 otp_qc_secured;
int delay = 0;
/* Wait for OTP signature test to complete */
usleep_range(2000, 2200);
wil->boot_config = WIL_BOOT_ERR;
/* Poll until OTP signature status is valid.
* In vanilla and development modes, when signature test is complete
* HW sets BIT_OTP_SIGNATURE_ERR_TALYN_MB.
* In production mode BIT_OTP_SIGNATURE_ERR_TALYN_MB remains 0, poll
* for signature status change to 2 or 3.
*/
do {
otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
signature_status = WIL_GET_BITS(otp_hw, 8, 9);
otp_signature_err = otp_hw & BIT_OTP_SIGNATURE_ERR_TALYN_MB;
if (otp_signature_err &&
signature_status == WIL_SIG_STATUS_VANILLA) {
wil->boot_config = WIL_BOOT_VANILLA;
break;
}
if (otp_signature_err &&
signature_status == WIL_SIG_STATUS_DEVELOPMENT) {
wil->boot_config = WIL_BOOT_DEVELOPMENT;
break;
}
if (!otp_signature_err &&
signature_status == WIL_SIG_STATUS_PRODUCTION) {
wil->boot_config = WIL_BOOT_PRODUCTION;
break;
}
if (!otp_signature_err &&
signature_status ==
WIL_SIG_STATUS_CORRUPTED_PRODUCTION) {
/* Unrecognized OTP signature found. Possibly a
* corrupted production signature, access control
* is applied as in production mode, therefore
* do not fail
*/
wil->boot_config = WIL_BOOT_PRODUCTION;
break;
}
if (delay++ > OTP_HW_COUNT)
break;
usleep_range(OTP_HW_DELAY, OTP_HW_DELAY + 10);
} while (!otp_signature_err && signature_status == 0);
if (wil->boot_config == WIL_BOOT_ERR) {
wil_err(wil,
"invalid boot config, signature_status %d otp_signature_err %d\n",
signature_status, otp_signature_err);
return -ETIME;
}
wil_dbg_misc(wil,
"signature test done in %d usec, otp_hw 0x%x, boot_config %d\n",
delay * OTP_HW_DELAY, otp_hw, wil->boot_config);
if (wil->boot_config == WIL_BOOT_VANILLA)
/* Assuming not SPI boot (currently not supported) */
goto out;
hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
delay = 0;
while (!hw_section_done) {
msleep(RST_DELAY);
otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
if (delay++ > RST_COUNT) {
wil_err(wil, "TO waiting for hw_section_done\n");
return -ETIME;
}
}
wil_dbg_misc(wil, "HW section done in %d ms\n", delay * RST_DELAY);
otp_qc_secured = wil_r(wil, RGF_OTP_QC_SECURED);
wil->secured_boot = otp_qc_secured & BIT_BOOT_FROM_ROM ? 1 : 0;
wil_dbg_misc(wil, "secured boot is %sabled\n",
wil->secured_boot ? "en" : "dis");
out:
wil_dbg_misc(wil, "Reset completed\n");
return 0;
}
static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
{
u32 x;
int rc;
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
if (wil->hw_version < HW_VER_TALYN) {
/* Clear MAC link up */
wil_s(wil, RGF_HP_CTRL, BIT(15));
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0,
BIT_HPAL_PERST_FROM_PAD);
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
}
wil_halt_cpu(wil);
if (!no_flash) {
/* clear all boot loader "ready" bits */
wil_w(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v0,
boot_loader_ready), 0);
/* this should be safe to write even with old BLs */
wil_w(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_shutdown_handshake), 0);
}
/* Clear Fw Download notification */
wil_c(wil, RGF_USER_USAGE_6, BIT(0));
wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
/* XTAL stabilization should take about 3ms */
usleep_range(5000, 7000);
x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
wil_err(wil, "Xtal stabilization timeout\n"
"RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
return -ETIME;
}
/* switch 10k to XTAL*/
wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
/* 40 MHz */
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
if (wil->hw_version >= HW_VER_TALYN_MB) {
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x7e000000);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0xc00000f0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
} else {
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xfe000000);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
}
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
/* reset A2 PCIE AHB */
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
if (wil->hw_version == HW_VER_TALYN_MB)
rc = wil_wait_device_ready_talyn_mb(wil);
else
rc = wil_wait_device_ready(wil, no_flash);
if (rc)
return rc;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
/* enable fix for HW bug related to the SA/DA swap in AP Rx */
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
if (wil->hw_version < HW_VER_TALYN_MB && no_flash) {
/* Reset OTP HW vectors to fit 40MHz */
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME3, 0x1);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME4, 0x20027);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME5, 0x30003);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME6, 0x20002);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME7, 0x60001);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME8, 0x60001);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME9, 0x60001);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME10, 0x60001);
wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
}
return 0;
}
static void wil_collect_fw_info(struct wil6210_priv *wil)
{
struct wiphy *wiphy = wil_to_wiphy(wil);
u8 retry_short;
int rc;
wil_refresh_fw_capabilities(wil);
rc = wmi_get_mgmt_retry(wil, &retry_short);
if (!rc) {
wiphy->retry_short = retry_short;
wil_dbg_misc(wil, "FW retry_short: %d\n", retry_short);
}
}
void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
{
struct wiphy *wiphy = wil_to_wiphy(wil);
int features;
wil->keep_radio_on_during_sleep =
test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
wil->platform_capa) &&
test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
wil->keep_radio_on_during_sleep);
if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
else
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
wiphy->max_sched_scan_reqs = 1;
wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
}
if (test_bit(WMI_FW_CAPABILITY_TX_REQ_EXT, wil->fw_capabilities))
wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX;
if (wil->platform_ops.set_features) {
features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
wil->fw_capabilities) &&
test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
wil->platform_capa)) ?
BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
if (wil->n_msi == 3)
features |= BIT(WIL_PLATFORM_FEATURE_TRIPLE_MSI);
wil->platform_ops.set_features(wil->platform_handle, features);
}
if (test_bit(WMI_FW_CAPABILITY_BACK_WIN_SIZE_64,
wil->fw_capabilities)) {
wil->max_agg_wsize = WIL_MAX_AGG_WSIZE_64;
wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE_128;
} else {
wil->max_agg_wsize = WIL_MAX_AGG_WSIZE;
wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE;
}
update_supported_bands(wil);
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
{
le32_to_cpus(&r->base);
le16_to_cpus(&r->entry_size);
le16_to_cpus(&r->size);
le32_to_cpus(&r->tail);
le32_to_cpus(&r->head);
}
/* construct actual board file name to use */
void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
{
const char *board_file;
const char *wil_talyn_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
WIL_FW_NAME_TALYN;
if (wil->board_file) {
board_file = wil->board_file;
} else {
/* If specific FW file is used for Talyn,
* use specific board file
*/
if (strcmp(wil->wil_fw_name, wil_talyn_fw_name) == 0)
board_file = WIL_BRD_NAME_TALYN;
else
board_file = WIL_BOARD_FILE_NAME;
}
strscpy(buf, board_file, len);
}
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
struct wiphy *wiphy = wil_to_wiphy(wil);
union {
struct bl_dedicated_registers_v0 bl0;
struct bl_dedicated_registers_v1 bl1;
} bl;
u32 bl_ver;
u8 *mac;
u16 rf_status;
wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
sizeof(bl));
bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
mac = bl.bl0.mac_address;
if (bl_ver == 0) {
le32_to_cpus(&bl.bl0.rf_type);
le32_to_cpus(&bl.bl0.baseband_type);
rf_status = 0; /* actually, unknown */
wil_info(wil,
"Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
bl_ver, mac,
bl.bl0.rf_type, bl.bl0.baseband_type);
wil_info(wil, "Boot Loader build unknown for struct v0\n");
} else {
le16_to_cpus(&bl.bl1.rf_type);
rf_status = le16_to_cpu(bl.bl1.rf_status);
le32_to_cpus(&bl.bl1.baseband_type);
le16_to_cpus(&bl.bl1.bl_version_subminor);
le16_to_cpus(&bl.bl1.bl_version_build);
wil_info(wil,
"Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
bl_ver, mac,
bl.bl1.rf_type, rf_status,
bl.bl1.baseband_type);
wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
}
if (!is_valid_ether_addr(mac)) {
wil_err(wil, "BL: Invalid MAC %pM\n", mac);
return -EINVAL;
}
ether_addr_copy(ndev->perm_addr, mac);
ether_addr_copy(wiphy->perm_addr, mac);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_set(ndev, mac);
if (rf_status) {/* bad RF cable? */
wil_err(wil, "RF communication error 0x%04x",
rf_status);
return -EAGAIN;
}
return 0;
}
static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
{
u32 bl_assert_code, bl_assert_blink, bl_magic_number;
u32 bl_ver = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v0,
boot_loader_struct_version));
if (bl_ver < 2)
return;
bl_assert_code = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_assert_code));
bl_assert_blink = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_assert_blink));
bl_magic_number = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_magic_number));
if (is_err) {
wil_err(wil,
"BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
bl_assert_code, bl_assert_blink, bl_magic_number);
} else {
wil_dbg_misc(wil,
"BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
bl_assert_code, bl_assert_blink, bl_magic_number);
}
}
static int wil_get_otp_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
struct wiphy *wiphy = wil_to_wiphy(wil);
u8 mac[8];
int mac_addr;
/* OEM MAC has precedence */
mac_addr = RGF_OTP_OEM_MAC;
wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr), sizeof(mac));
if (is_valid_ether_addr(mac)) {
wil_info(wil, "using OEM MAC %pM\n", mac);
} else {
if (wil->hw_version >= HW_VER_TALYN_MB)
mac_addr = RGF_OTP_MAC_TALYN_MB;
else
mac_addr = RGF_OTP_MAC;
wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
sizeof(mac));
}
if (!is_valid_ether_addr(mac)) {
wil_err(wil, "Invalid MAC %pM\n", mac);
return -EINVAL;
}
ether_addr_copy(ndev->perm_addr, mac);
ether_addr_copy(wiphy->perm_addr, mac);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_set(ndev, mac);
return 0;
}
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
ulong to = msecs_to_jiffies(2000);
ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
if (0 == left) {
wil_err(wil, "Firmware not ready\n");
return -ETIME;
} else {
wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
jiffies_to_msecs(to-left), wil->hw_version);
}
return 0;
}
void wil_abort_scan(struct wil6210_vif *vif, bool sync)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct cfg80211_scan_info info = {
.aborted = true,
};
lockdep_assert_held(&wil->vif_mutex);
if (!vif->scan_request)
return;
wil_dbg_misc(wil, "Abort scan_request 0x%p\n", vif->scan_request);
del_timer_sync(&vif->scan_timer);
mutex_unlock(&wil->vif_mutex);
rc = wmi_abort_scan(vif);
if (!rc && sync)
wait_event_interruptible_timeout(wil->wq, !vif->scan_request,
msecs_to_jiffies(
WAIT_FOR_SCAN_ABORT_MS));
mutex_lock(&wil->vif_mutex);
if (vif->scan_request) {
cfg80211_scan_done(vif->scan_request, &info);
vif->scan_request = NULL;
}
}
void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync)
{
int i;
lockdep_assert_held(&wil->vif_mutex);
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (vif)
wil_abort_scan(vif, sync);
}
}
int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile)
{
int rc;
if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
wil_err(wil, "set_power_mgmt not supported\n");
return -EOPNOTSUPP;
}
rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
if (rc)
wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
else
wil->ps_profile = ps_profile;
return rc;
}
static void wil_pre_fw_config(struct wil6210_priv *wil)
{
wil_clear_fw_log_addr(wil);
/* Mark FW as loaded from host */
wil_s(wil, RGF_USER_USAGE_6, 1);
/* clear any interrupts which on-card-firmware
* may have set
*/
wil6210_clear_irq(wil);
/* CAF_ICR - clear and mask */
/* it is W1C, clear by writing back same value */
if (wil->hw_version < HW_VER_TALYN_MB) {
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
}
/* clear PAL_UNIT_ICR (potential D0->D3 leftover)
* In Talyn-MB host cannot access this register due to
* access control, hence PAL_UNIT_ICR is cleared by the FW
*/
if (wil->hw_version < HW_VER_TALYN_MB)
wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR),
0);
if (wil->fw_calib_result > 0) {
__le32 val = cpu_to_le32(wil->fw_calib_result |
(CALIB_RESULT_SIGNATURE << 8));
wil_w(wil, RGF_USER_FW_CALIB_RESULT, (u32 __force)val);
}
}
static int wil_restore_vifs(struct wil6210_priv *wil)
{
struct wil6210_vif *vif;
struct net_device *ndev;
struct wireless_dev *wdev;
int i, rc;
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (!vif)
continue;
vif->ap_isolate = 0;
if (vif->mid) {
ndev = vif_to_ndev(vif);
wdev = vif_to_wdev(vif);
rc = wmi_port_allocate(wil, vif->mid, ndev->dev_addr,
wdev->iftype);
if (rc) {
wil_err(wil, "fail to restore VIF %d type %d, rc %d\n",
i, wdev->iftype, rc);
return rc;
}
}
}
return 0;
}
/*
* Clear FW and ucode log start addr to indicate FW log is not ready. The host
* driver clears the addresses before FW starts and FW initializes the address
* when it is ready to send logs.
*/
void wil_clear_fw_log_addr(struct wil6210_priv *wil)
{
/* FW log addr */
wil_w(wil, RGF_USER_USAGE_1, 0);
/* ucode log addr */
wil_w(wil, RGF_USER_USAGE_2, 0);
wil_dbg_misc(wil, "Cleared FW and ucode log address");
}
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
* the firmware.
*/
int wil_reset(struct wil6210_priv *wil, bool load_fw)
{
int rc, i;
unsigned long status_flags = BIT(wil_status_resetting);
int no_flash;
struct wil6210_vif *vif;
wil_dbg_misc(wil, "reset\n");
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
if (debug_fw) {
static const u8 mac[ETH_ALEN] = {
0x00, 0xde, 0xad, 0x12, 0x34, 0x56,
};
struct net_device *ndev = wil->main_ndev;
ether_addr_copy(ndev->perm_addr, mac);
eth_hw_addr_set(ndev, ndev->perm_addr);
return 0;
}
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa) &&
wil->hw_version < HW_VER_TALYN_MB) {
wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
}
if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
}
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_PRE_RESET);
if (rc)
wil_err(wil, "PRE_RESET platform notify failed, rc %d\n",
rc);
}
set_bit(wil_status_resetting, wil->status);
mutex_lock(&wil->vif_mutex);
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
cancel_work_sync(&vif->disconnect_worker);
wil6210_disconnect(vif, NULL,
WLAN_REASON_DEAUTH_LEAVING);
vif->ptk_rekey_state = WIL_REKEY_IDLE;
}
}
wil_bcast_fini_all(wil);
/* Disable device led before reset*/
wmi_led_cfg(wil, false);
down_write(&wil->mem_lock);
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
if (test_bit(wil_status_suspending, wil->status))
status_flags |= BIT(wil_status_suspending);
bitmap_and(wil->status, wil->status, &status_flags,
wil_status_last);
wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
mutex_unlock(&wil->wmi_mutex);
wil_mask_irq(wil);
wmi_event_flush(wil);
flush_workqueue(wil->wq_service);
flush_workqueue(wil->wmi_wq);
no_flash = test_bit(hw_capa_no_flash, wil->hw_capa);
if (!no_flash)
wil_bl_crash_info(wil, false);
wil_disable_irq(wil);
rc = wil_target_reset(wil, no_flash);
wil6210_clear_irq(wil);
wil_enable_irq(wil);
wil->txrx_ops.rx_fini(wil);
wil->txrx_ops.tx_fini(wil);
if (rc) {
if (!no_flash)
wil_bl_crash_info(wil, true);
goto out;
}
if (no_flash) {
rc = wil_get_otp_info(wil);
} else {
rc = wil_get_bl_info(wil);
if (rc == -EAGAIN && !load_fw)
/* ignore RF error if not going up */
rc = 0;
}
if (rc)
goto out;
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
char board_file[WIL_BOARD_FILE_MAX_NAMELEN];
if (wil->secured_boot) {
wil_err(wil, "secured boot is not supported\n");
up_write(&wil->mem_lock);
return -ENOTSUPP;
}
board_file[0] = '\0';
wil_get_board_file(wil, board_file, sizeof(board_file));
wil_info(wil, "Use firmware <%s> + board <%s>\n",
wil->wil_fw_name, board_file);
if (!no_flash)
wil_bl_prepare_halt(wil);
wil_halt_cpu(wil);
memset(wil->fw_version, 0, sizeof(wil->fw_version));
/* Loading f/w from the file */
rc = wil_request_firmware(wil, wil->wil_fw_name, true);
if (rc)
goto out;
if (wil->num_of_brd_entries)
rc = wil_request_board(wil, board_file);
else
rc = wil_request_firmware(wil, board_file, true);
if (rc)
goto out;
wil_pre_fw_config(wil);
wil_release_cpu(wil);
}
/* init after reset */
reinit_completion(&wil->wmi_ready);
reinit_completion(&wil->wmi_call);
reinit_completion(&wil->halp.comp);
clear_bit(wil_status_resetting, wil->status);
up_write(&wil->mem_lock);
if (load_fw) {
wil_unmask_irq(wil);
/* we just started MAC, wait for FW ready */
rc = wil_wait_for_fw_ready(wil);
if (rc)
return rc;
/* check FW is responsive */
rc = wmi_echo(wil);
if (rc) {
wil_err(wil, "wmi_echo failed, rc %d\n", rc);
return rc;
}
wil->txrx_ops.configure_interrupt_moderation(wil);
/* Enable OFU rdy valid bug fix, to prevent hang in oful34_rx
* while there is back-pressure from Host during RX
*/
if (wil->hw_version >= HW_VER_TALYN_MB)
wil_s(wil, RGF_DMA_MISC_CTL,
BIT_OFUL34_RDY_VALID_BUG_FIX_EN);
rc = wil_restore_vifs(wil);
if (rc) {
wil_err(wil, "failed to restore vifs, rc %d\n", rc);
return rc;
}
wil_collect_fw_info(wil);
if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
wil_ps_update(wil, wil->ps_profile);
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
if (rc) {
wil_err(wil, "FW_RDY notify failed, rc %d\n",
rc);
rc = 0;
}
}
}
return rc;
out:
up_write(&wil->mem_lock);
clear_bit(wil_status_resetting, wil->status);
return rc;
}
void wil_fw_error_recovery(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "starting fw error recovery\n");
if (test_bit(wil_status_resetting, wil->status)) {
wil_info(wil, "Reset already in progress\n");
return;
}
wil->recovery_state = fw_recovery_pending;
schedule_work(&wil->fw_error_worker);
}
int __wil_up(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
int rc;
WARN_ON(!mutex_is_locked(&wil->mutex));
rc = wil_reset(wil, true);
if (rc)
return rc;
/* Rx RING. After MAC and beacon */
if (rx_ring_order == 0)
rx_ring_order = wil->hw_version < HW_VER_TALYN_MB ?
WIL_RX_RING_SIZE_ORDER_DEFAULT :
WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT;
rc = wil->txrx_ops.rx_init(wil, rx_ring_order);
if (rc)
return rc;
rc = wil->txrx_ops.tx_init(wil);
if (rc)
return rc;
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
wil_dbg_misc(wil, "type: STATION\n");
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_AP:
wil_dbg_misc(wil, "type: AP\n");
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_P2P_CLIENT:
wil_dbg_misc(wil, "type: P2P_CLIENT\n");
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_P2P_GO:
wil_dbg_misc(wil, "type: P2P_GO\n");
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_MONITOR:
wil_dbg_misc(wil, "type: Monitor\n");
ndev->type = ARPHRD_IEEE80211_RADIOTAP;
/* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
break;
default:
return -EOPNOTSUPP;
}
/* MAC address - pre-requisite for other commands */
wmi_set_mac_address(wil, ndev->dev_addr);
wil_dbg_misc(wil, "NAPI enable\n");
napi_enable(&wil->napi_rx);
napi_enable(&wil->napi_tx);
set_bit(wil_status_napi_en, wil->status);
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
return 0;
}
int wil_up(struct wil6210_priv *wil)
{
int rc;
wil_dbg_misc(wil, "up\n");
mutex_lock(&wil->mutex);
rc = __wil_up(wil);
mutex_unlock(&wil->mutex);
return rc;
}
int __wil_down(struct wil6210_priv *wil)
{
int rc;
WARN_ON(!mutex_is_locked(&wil->mutex));
set_bit(wil_status_resetting, wil->status);
wil6210_bus_request(wil, 0);
wil_disable_irq(wil);
if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
napi_disable(&wil->napi_rx);
napi_disable(&wil->napi_tx);
wil_dbg_misc(wil, "NAPI disable\n");
}
wil_enable_irq(wil);
mutex_lock(&wil->vif_mutex);
wil_p2p_stop_radio_operations(wil);
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
rc = wil_reset(wil, false);
return rc;
}
int wil_down(struct wil6210_priv *wil)
{
int rc;
wil_dbg_misc(wil, "down\n");
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
rc = __wil_down(wil);
mutex_unlock(&wil->mutex);
return rc;
}
int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac)
{
int i;
int rc = -ENOENT;
for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].mid == mid &&
wil->sta[i].status != wil_sta_unused &&
ether_addr_equal(wil->sta[i].addr, mac)) {
rc = i;
break;
}
}
return rc;
}
void wil_halp_vote(struct wil6210_priv *wil)
{
unsigned long rc;
unsigned long to_jiffies = msecs_to_jiffies(WAIT_FOR_HALP_VOTE_MS);
if (wil->hw_version >= HW_VER_TALYN_MB)
return;
mutex_lock(&wil->halp.lock);
wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
reinit_completion(&wil->halp.comp);
/* mark to IRQ context to handle HALP ICR */
wil->halp.handle_icr = true;
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc) {
wil_err(wil, "HALP vote timed out\n");
/* Mask HALP as done in case the interrupt is raised */
wil->halp.handle_icr = false;
wil6210_mask_halp(wil);
} else {
wil_dbg_irq(wil,
"halp_vote: HALP vote completed after %d ms\n",
jiffies_to_msecs(to_jiffies - rc));
}
}
wil_dbg_irq(wil, "halp_vote: end, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
void wil_halp_unvote(struct wil6210_priv *wil)
{
if (wil->hw_version >= HW_VER_TALYN_MB)
return;
WARN_ON(wil->halp.ref_cnt == 0);
mutex_lock(&wil->halp.lock);
wil_dbg_irq(wil, "halp_unvote: start, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
if (--wil->halp.ref_cnt == 0) {
wil6210_clear_halp(wil);
wil_dbg_irq(wil, "HALP unvote\n");
}
wil_dbg_irq(wil, "halp_unvote:end, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
void wil_init_txrx_ops(struct wil6210_priv *wil)
{
if (wil->use_enhanced_dma_hw)
wil_init_txrx_ops_edma(wil);
else
wil_init_txrx_ops_legacy_dma(wil);
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/main.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "wil6210.h"
#include <linux/devcoredump.h>
static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
u32 *out_dump_size, u32 *out_host_min)
{
int i;
const struct fw_map *map;
u32 host_min, host_max, tmp_max;
if (!out_dump_size)
return -EINVAL;
/* calculate the total size of the unpacked crash dump */
BUILD_BUG_ON(ARRAY_SIZE(fw_mapping) == 0);
map = &fw_mapping[0];
host_min = map->host;
host_max = map->host + (map->to - map->from);
for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
if (!map->crash_dump)
continue;
if (map->host < host_min)
host_min = map->host;
tmp_max = map->host + (map->to - map->from);
if (tmp_max > host_max)
host_max = tmp_max;
}
*out_dump_size = host_max - host_min;
if (out_host_min)
*out_host_min = host_min;
return 0;
}
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
{
int i;
const struct fw_map *map;
void *data;
u32 host_min, dump_size, offset, len;
if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) {
wil_err(wil, "fail to obtain crash dump size\n");
return -EINVAL;
}
if (dump_size > size) {
wil_err(wil, "not enough space for dump. Need %d have %d\n",
dump_size, size);
return -EINVAL;
}
down_write(&wil->mem_lock);
if (test_bit(wil_status_suspending, wil->status) ||
test_bit(wil_status_suspended, wil->status)) {
wil_err(wil,
"suspend/resume in progress. cannot copy crash dump\n");
up_write(&wil->mem_lock);
return -EBUSY;
}
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
if (!map->crash_dump)
continue;
data = (void * __force)wil->csr + HOSTADDR(map->host);
len = map->to - map->from;
offset = map->host - host_min;
wil_dbg_misc(wil,
"fw_copy_crash_dump: - dump %s, size %d, offset %d\n",
fw_mapping[i].name, len, offset);
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
}
up_write(&wil->mem_lock);
return 0;
}
void wil_fw_core_dump(struct wil6210_priv *wil)
{
void *fw_dump_data;
u32 fw_dump_size;
if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) {
wil_err(wil, "fail to get fw dump size\n");
return;
}
fw_dump_data = vzalloc(fw_dump_size);
if (!fw_dump_data)
return;
if (wil_fw_copy_crash_dump(wil, fw_dump_data, fw_dump_size)) {
vfree(fw_dump_data);
return;
}
/* fw_dump_data will be free in device coredump release function
* after 5 min
*/
dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL);
wil_info(wil, "fw core dumped, size %d bytes\n", fw_dump_size);
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*/
#include <linux/device.h>
#include "wil_platform.h"
int __init wil_platform_modinit(void)
{
return 0;
}
void wil_platform_modexit(void)
{
}
/* wil_platform_init() - wil6210 platform module init
*
* The function must be called before all other functions in this module.
* It returns a handle which is used with the rest of the API
*
*/
void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops,
const struct wil_platform_rops *rops, void *wil_handle)
{
void *handle = ops; /* to return some non-NULL for 'void' impl. */
if (!ops) {
dev_err(dev,
"Invalid parameter. Cannot init platform module\n");
return NULL;
}
/* platform specific init functions should be called here */
return handle;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/wil_platform.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/etherdevice.h>
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <net/ipv6.h>
#include <linux/prefetch.h>
#include "wil6210.h"
#include "wmi.h"
#include "txrx.h"
#include "trace.h"
#include "txrx_edma.h"
bool rx_align_2;
module_param(rx_align_2, bool, 0444);
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
bool rx_large_buf;
module_param(rx_large_buf, bool, 0444);
MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
/* Drop Tx packets in case Tx ring is full */
bool drop_if_ring_full;
static inline uint wil_rx_snaplen(void)
{
return rx_align_2 ? 6 : 0;
}
/* wil_ring_wmark_low - low watermark for available descriptor space */
static inline int wil_ring_wmark_low(struct wil_ring *ring)
{
return ring->size / 8;
}
/* wil_ring_wmark_high - high watermark for available descriptor space */
static inline int wil_ring_wmark_high(struct wil_ring *ring)
{
return ring->size / 4;
}
/* returns true if num avail descriptors is lower than wmark_low */
static inline int wil_ring_avail_low(struct wil_ring *ring)
{
return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
}
/* returns true if num avail descriptors is higher than wmark_high */
static inline int wil_ring_avail_high(struct wil_ring *ring)
{
return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
}
/* returns true when all tx vrings are empty */
bool wil_is_tx_idle(struct wil6210_priv *wil)
{
int i;
unsigned long data_comp_to;
int min_ring_id = wil_get_min_tx_ring_id(wil);
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
struct wil_ring *vring = &wil->ring_tx[i];
int vring_index = vring - wil->ring_tx;
struct wil_ring_tx_data *txdata =
&wil->ring_tx_data[vring_index];
spin_lock(&txdata->lock);
if (!vring->va || !txdata->enabled) {
spin_unlock(&txdata->lock);
continue;
}
data_comp_to = jiffies + msecs_to_jiffies(
WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
while (!wil_ring_is_empty(vring)) {
if (time_after(jiffies, data_comp_to)) {
wil_dbg_pm(wil,
"TO waiting for idle tx\n");
spin_unlock(&txdata->lock);
return false;
}
wil_dbg_ratelimited(wil,
"tx vring is not empty -> NAPI\n");
spin_unlock(&txdata->lock);
napi_synchronize(&wil->napi_tx);
msleep(20);
spin_lock(&txdata->lock);
if (!vring->va || !txdata->enabled)
break;
}
}
spin_unlock(&txdata->lock);
}
return true;
}
static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = vring->size * sizeof(vring->va[0]);
uint i;
wil_dbg_misc(wil, "vring_alloc:\n");
BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
vring->swhead = 0;
vring->swtail = 0;
vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
if (!vring->ctx) {
vring->va = NULL;
return -ENOMEM;
}
/* vring->va should be aligned on its size rounded up to power of 2
* This is granted by the dma_alloc_coherent.
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
* if we are using more than 32 bit addresses switch to 32 bit
* allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
if (!vring->va) {
kfree(vring->ctx);
vring->ctx = NULL;
return -ENOMEM;
}
if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(wil->dma_addr_size));
/* initially, all descriptors are SW owned
* For Tx and Rx, ownership bit is at the same location, thus
* we can use any
*/
for (i = 0; i < vring->size; i++) {
volatile struct vring_tx_desc *_d =
&vring->va[i].tx.legacy;
_d->dma.status = TX_DMA_STATUS_DU;
}
wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
vring->va, &vring->pa, vring->ctx);
return 0;
}
static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
struct wil_ctx *ctx)
{
struct vring_tx_desc *d = &desc->legacy;
dma_addr_t pa = wil_desc_addr(&d->dma.addr);
u16 dmalen = le16_to_cpu(d->dma.length);
switch (ctx->mapped_as) {
case wil_mapped_as_single:
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
break;
case wil_mapped_as_page:
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
break;
default:
break;
}
}
static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = vring->size * sizeof(vring->va[0]);
lockdep_assert_held(&wil->mutex);
if (!vring->is_rx) {
int vring_index = vring - wil->ring_tx;
wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
vring_index, vring->size, vring->va,
&vring->pa, vring->ctx);
} else {
wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
vring->size, vring->va,
&vring->pa, vring->ctx);
}
while (!wil_ring_is_empty(vring)) {
dma_addr_t pa;
u16 dmalen;
struct wil_ctx *ctx;
if (!vring->is_rx) {
struct vring_tx_desc dd, *d = ⅆ
volatile struct vring_tx_desc *_d =
&vring->va[vring->swtail].tx.legacy;
ctx = &vring->ctx[vring->swtail];
if (!ctx) {
wil_dbg_txrx(wil,
"ctx(%d) was already completed\n",
vring->swtail);
vring->swtail = wil_ring_next_tail(vring);
continue;
}
*d = *_d;
wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
vring->swtail = wil_ring_next_tail(vring);
} else { /* rx */
struct vring_rx_desc dd, *d = ⅆ
volatile struct vring_rx_desc *_d =
&vring->va[vring->swhead].rx.legacy;
ctx = &vring->ctx[vring->swhead];
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
kfree_skb(ctx->skb);
wil_ring_advance_head(vring, 1);
}
}
dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
kfree(vring->ctx);
vring->pa = 0;
vring->va = NULL;
vring->ctx = NULL;
}
/* Allocate one skb for Rx VRING
*
* Safe to call from IRQ
*/
static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
struct vring_rx_desc dd, *d = ⅆ
volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
dma_addr_t pa;
struct sk_buff *skb = dev_alloc_skb(sz + headroom);
if (unlikely(!skb))
return -ENOMEM;
skb_reserve(skb, headroom);
skb_put(skb, sz);
/**
* Make sure that the network stack calculates checksum for packets
* which failed the HW checksum calculation
*/
skb->ip_summed = CHECKSUM_NONE;
pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
kfree_skb(skb);
return -ENOMEM;
}
d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
wil_desc_addr_set(&d->dma.addr, pa);
/* ip_length don't care */
/* b11 don't care */
/* error don't care */
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
d->dma.length = cpu_to_le16(sz);
*_d = *d;
vring->ctx[i].skb = skb;
return 0;
}
/* Adds radiotap header
*
* Any error indicated as "Bad FCS"
*
* Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
* - Rx descriptor: 32 bytes
* - Phy info
*/
static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
struct sk_buff *skb)
{
struct wil6210_rtap {
struct ieee80211_radiotap_header rthdr;
/* fields should be in the order of bits in rthdr.it_present */
/* flags */
u8 flags;
/* channel */
__le16 chnl_freq __aligned(2);
__le16 chnl_flags;
/* MCS */
u8 mcs_present;
u8 mcs_flags;
u8 mcs_index;
} __packed;
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
struct wil6210_rtap *rtap;
int rtap_len = sizeof(struct wil6210_rtap);
struct ieee80211_channel *ch = wil->monitor_chandef.chan;
if (skb_headroom(skb) < rtap_len &&
pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
return;
}
rtap = skb_push(skb, rtap_len);
memset(rtap, 0, rtap_len);
rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
rtap->rthdr.it_len = cpu_to_le16(rtap_len);
rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
(1 << IEEE80211_RADIOTAP_CHANNEL) |
(1 << IEEE80211_RADIOTAP_MCS));
if (d->dma.status & RX_DMA_STATUS_ERROR)
rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS;
rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
rtap->chnl_flags = cpu_to_le16(0);
rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
rtap->mcs_flags = 0;
rtap->mcs_index = wil_rxdesc_mcs(d);
}
static bool wil_is_rx_idle(struct wil6210_priv *wil)
{
struct vring_rx_desc *_d;
struct wil_ring *ring = &wil->ring_rx;
_d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
if (_d->dma.status & RX_DMA_STATUS_DU)
return false;
return true;
}
static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
int mid = wil_rxdesc_mid(d);
struct wil6210_vif *vif = wil->vifs[mid];
/* cid from DMA descriptor is limited to 3 bits.
* In case of cid>=8, the value would be cid modulo 8 and we need to
* find real cid by locating the transmitter (ta) inside sta array
*/
int cid = wil_rxdesc_cid(d);
unsigned int snaplen = wil_rx_snaplen();
struct ieee80211_hdr_3addr *hdr;
int i;
unsigned char *ta;
u8 ftype;
/* in monitor mode there are no connections */
if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
return cid;
ftype = wil_rxdesc_ftype(d) << 2;
if (likely(ftype == IEEE80211_FTYPE_DATA)) {
if (unlikely(skb->len < ETH_HLEN + snaplen)) {
wil_err_ratelimited(wil,
"Short data frame, len = %d\n",
skb->len);
return -ENOENT;
}
ta = wil_skb_get_sa(skb);
} else {
if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
wil_err_ratelimited(wil, "Short frame, len = %d\n",
skb->len);
return -ENOENT;
}
hdr = (void *)skb->data;
ta = hdr->addr2;
}
if (wil->max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
return cid;
/* assuming no concurrency between AP interfaces and STA interfaces.
* multista is used only in P2P_GO or AP mode. In other modes return
* cid from the rx descriptor
*/
if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO &&
vif->wdev.iftype != NL80211_IFTYPE_AP)
return cid;
/* For Rx packets cid from rx descriptor is limited to 3 bits (0..7),
* to find the real cid, compare transmitter address with the stored
* stations mac address in the driver sta array
*/
for (i = cid; i < wil->max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
if (wil->sta[i].status != wil_sta_unused &&
ether_addr_equal(wil->sta[i].addr, ta)) {
cid = i;
break;
}
}
if (i >= wil->max_assoc_sta) {
wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
ta, vif->wdev.iftype, ftype, skb->len);
cid = -ENOENT;
}
return cid;
}
/* reap 1 frame from @swhead
*
* Rx descriptor copied to skb->cb
*
* Safe to call from IRQ
*/
static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
struct wil6210_vif *vif;
struct net_device *ndev;
volatile struct vring_rx_desc *_d;
struct vring_rx_desc *d;
struct sk_buff *skb;
dma_addr_t pa;
unsigned int snaplen = wil_rx_snaplen();
unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
u16 dmalen;
u8 ftype;
int cid, mid;
int i;
struct wil_net_stats *stats;
BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
again:
if (unlikely(wil_ring_is_empty(vring)))
return NULL;
i = (int)vring->swhead;
_d = &vring->va[i].rx.legacy;
if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
/* it is not error, we just reached end of Rx done area */
return NULL;
}
skb = vring->ctx[i].skb;
vring->ctx[i].skb = NULL;
wil_ring_advance_head(vring, 1);
if (!skb) {
wil_err(wil, "No Rx skb at [%d]\n", i);
goto again;
}
d = wil_skb_rxdesc(skb);
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
dmalen = le16_to_cpu(d->dma.length);
trace_wil6210_rx(i, d);
wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
mid = wil_rxdesc_mid(d);
vif = wil->vifs[mid];
if (unlikely(!vif)) {
wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d",
mid);
kfree_skb(skb);
goto again;
}
ndev = vif_to_ndev(vif);
if (unlikely(dmalen > sz)) {
wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n",
dmalen);
kfree_skb(skb);
goto again;
}
skb_trim(skb, dmalen);
prefetch(skb->data);
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
cid = wil_rx_get_cid_by_skb(wil, skb);
if (cid == -ENOENT) {
kfree_skb(skb);
goto again;
}
wil_skb_set_cid(skb, (u8)cid);
stats = &wil->sta[cid].stats;
stats->last_mcs_rx = wil_rxdesc_mcs(d);
if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
stats->rx_per_mcs[stats->last_mcs_rx]++;
/* use radiotap header only if required */
if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
wil_rx_add_radiotap_header(wil, skb);
/* no extra checks if in sniffer mode */
if (ndev->type != ARPHRD_ETHER)
return skb;
/* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
* Driver should recognize it by frame type, that is found
* in Rx descriptor. If type is not data, it is 802.11 frame as is
*/
ftype = wil_rxdesc_ftype(d) << 2;
if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
u8 fc1 = wil_rxdesc_fc1(d);
int tid = wil_rxdesc_tid(d);
u16 seq = wil_rxdesc_seq(d);
wil_dbg_txrx(wil,
"Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
fc1, mid, cid, tid, seq);
stats->rx_non_data_frame++;
if (wil_is_back_req(fc1)) {
wil_dbg_txrx(wil,
"BAR: MID %d CID %d TID %d Seq 0x%03x\n",
mid, cid, tid, seq);
wil_rx_bar(wil, vif, cid, tid, seq);
} else {
/* print again all info. One can enable only this
* without overhead for printing every Rx frame
*/
wil_dbg_txrx(wil,
"Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
fc1, mid, cid, tid, seq);
wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
}
kfree_skb(skb);
goto again;
}
/* L4 IDENT is on when HW calculated checksum, check status
* and in case of error drop the packet
* higher stack layers will handle retransmission (if required)
*/
if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
/* L4 protocol identified, csum calculated */
if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If HW reports bad checksum, let IP stack re-check it
* For example, HW don't understand Microsoft IP stack that
* mis-calculates TCP checksum - if it should be 0x0,
* it writes 0xffff in violation of RFC 1624
*/
else
stats->rx_csum_err++;
}
if (snaplen) {
/* Packet layout
* +-------+-------+---------+------------+------+
* | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
* +-------+-------+---------+------------+------+
* Need to remove SNAP, shifting SA and DA forward
*/
memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
skb_pull(skb, snaplen);
}
return skb;
}
/* allocate and fill up to @count buffers in rx ring
* buffers posted at @swtail
* Note: we have a single RX queue for servicing all VIFs, but we
* allocate skbs with headroom according to main interface only. This
* means it will not work with monitor interface together with other VIFs.
* Currently we only support monitor interface on its own without other VIFs,
* and we will need to fix this code once we add support.
*/
static int wil_rx_refill(struct wil6210_priv *wil, int count)
{
struct net_device *ndev = wil->main_ndev;
struct wil_ring *v = &wil->ring_rx;
u32 next_tail;
int rc = 0;
int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
WIL6210_RTAP_SIZE : 0;
for (; next_tail = wil_ring_next_tail(v),
(next_tail != v->swhead) && (count-- > 0);
v->swtail = next_tail) {
rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
if (unlikely(rc)) {
wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
rc, v->swtail);
break;
}
}
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
wil_w(wil, v->hwtail, v->swtail);
return rc;
}
/**
* reverse_memcmp - Compare two areas of memory, in reverse order
* @cs: One area of memory
* @ct: Another area of memory
* @count: The size of the area.
*
* Cut'n'paste from original memcmp (see lib/string.c)
* with minimal modifications
*/
int reverse_memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
--su1, --su2, count--) {
res = *su1 - *su2;
if (res)
break;
}
return res;
}
static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
int cid = wil_skb_get_cid(skb);
int tid = wil_rxdesc_tid(d);
int key_id = wil_rxdesc_key_id(d);
int mc = wil_rxdesc_mcast(d);
struct wil_sta_info *s = &wil->sta[cid];
struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
&s->tid_crypto_rx[tid];
struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
const u8 *pn = (u8 *)&d->mac.pn;
if (!cc->key_set) {
wil_err_ratelimited(wil,
"Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
cid, tid, mc, key_id);
return -EINVAL;
}
if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
wil_err_ratelimited(wil,
"Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
cid, tid, mc, key_id, pn, cc->pn);
return -EINVAL;
}
memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
return 0;
}
static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
struct wil_net_stats *stats)
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
(d->dma.error & RX_DMA_ERROR_MIC)) {
stats->rx_mic_error++;
wil_dbg_txrx(wil, "MIC error, dropping packet\n");
return -EFAULT;
}
return 0;
}
static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
int *security)
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
*cid = wil_skb_get_cid(skb);
*security = wil_rxdesc_security(d);
}
/*
* Check if skb is ptk eapol key message
*
* returns a pointer to the start of the eapol key structure, NULL
* if frame is not PTK eapol key
*/
static struct wil_eapol_key *wil_is_ptk_eapol_key(struct wil6210_priv *wil,
struct sk_buff *skb)
{
u8 *buf;
const struct wil_1x_hdr *hdr;
struct wil_eapol_key *key;
u16 key_info;
int len = skb->len;
if (!skb_mac_header_was_set(skb)) {
wil_err(wil, "mac header was not set\n");
return NULL;
}
len -= skb_mac_offset(skb);
if (len < sizeof(struct ethhdr) + sizeof(struct wil_1x_hdr) +
sizeof(struct wil_eapol_key))
return NULL;
buf = skb_mac_header(skb) + sizeof(struct ethhdr);
hdr = (const struct wil_1x_hdr *)buf;
if (hdr->type != WIL_1X_TYPE_EAPOL_KEY)
return NULL;
key = (struct wil_eapol_key *)(buf + sizeof(struct wil_1x_hdr));
if (key->type != WIL_EAPOL_KEY_TYPE_WPA &&
key->type != WIL_EAPOL_KEY_TYPE_RSN)
return NULL;
key_info = be16_to_cpu(key->key_info);
if (!(key_info & WIL_KEY_INFO_KEY_TYPE)) /* check if pairwise */
return NULL;
return key;
}
static bool wil_skb_is_eap_3(struct wil6210_priv *wil, struct sk_buff *skb)
{
struct wil_eapol_key *key;
u16 key_info;
key = wil_is_ptk_eapol_key(wil, skb);
if (!key)
return false;
key_info = be16_to_cpu(key->key_info);
if (key_info & (WIL_KEY_INFO_MIC |
WIL_KEY_INFO_ENCR_KEY_DATA)) {
/* 3/4 of 4-Way Handshake */
wil_dbg_misc(wil, "EAPOL key message 3\n");
return true;
}
/* 1/4 of 4-Way Handshake */
wil_dbg_misc(wil, "EAPOL key message 1\n");
return false;
}
static bool wil_skb_is_eap_4(struct wil6210_priv *wil, struct sk_buff *skb)
{
struct wil_eapol_key *key;
u32 *nonce, i;
key = wil_is_ptk_eapol_key(wil, skb);
if (!key)
return false;
nonce = (u32 *)key->key_nonce;
for (i = 0; i < WIL_EAP_NONCE_LEN / sizeof(u32); i++, nonce++) {
if (*nonce != 0) {
/* message 2/4 */
wil_dbg_misc(wil, "EAPOL key message 2\n");
return false;
}
}
wil_dbg_misc(wil, "EAPOL key message 4\n");
return true;
}
void wil_enable_tx_key_worker(struct work_struct *work)
{
struct wil6210_vif *vif = container_of(work,
struct wil6210_vif, enable_tx_key_worker);
struct wil6210_priv *wil = vif_to_wil(vif);
int rc, cid;
rtnl_lock();
if (vif->ptk_rekey_state != WIL_REKEY_WAIT_M4_SENT) {
wil_dbg_misc(wil, "Invalid rekey state = %d\n",
vif->ptk_rekey_state);
rtnl_unlock();
return;
}
cid = wil_find_cid_by_idx(wil, vif->mid, 0);
if (!wil_cid_valid(wil, cid)) {
wil_err(wil, "Invalid cid = %d\n", cid);
rtnl_unlock();
return;
}
wil_dbg_misc(wil, "Apply PTK key after eapol was sent out\n");
rc = wmi_add_cipher_key(vif, 0, wil->sta[cid].addr, 0, NULL,
WMI_KEY_USE_APPLY_PTK);
vif->ptk_rekey_state = WIL_REKEY_IDLE;
rtnl_unlock();
if (rc)
wil_err(wil, "Apply PTK key failed %d\n", rc);
}
void wil_tx_complete_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wireless_dev *wdev = vif_to_wdev(vif);
bool q = false;
if (wdev->iftype != NL80211_IFTYPE_STATION ||
!test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
return;
/* check if skb is an EAP message 4/4 */
if (!wil_skb_is_eap_4(wil, skb))
return;
spin_lock_bh(&wil->eap_lock);
switch (vif->ptk_rekey_state) {
case WIL_REKEY_IDLE:
/* ignore idle state, can happen due to M4 retransmission */
break;
case WIL_REKEY_M3_RECEIVED:
vif->ptk_rekey_state = WIL_REKEY_IDLE;
break;
case WIL_REKEY_WAIT_M4_SENT:
q = true;
break;
default:
wil_err(wil, "Unknown rekey state = %d",
vif->ptk_rekey_state);
}
spin_unlock_bh(&wil->eap_lock);
if (q) {
q = queue_work(wil->wmi_wq, &vif->enable_tx_key_worker);
wil_dbg_misc(wil, "queue_work of enable_tx_key_worker -> %d\n",
q);
}
}
static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wireless_dev *wdev = vif_to_wdev(vif);
if (wdev->iftype != NL80211_IFTYPE_STATION ||
!test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
return;
/* check if skb is a EAP message 3/4 */
if (!wil_skb_is_eap_3(wil, skb))
return;
if (vif->ptk_rekey_state == WIL_REKEY_IDLE)
vif->ptk_rekey_state = WIL_REKEY_M3_RECEIVED;
}
/*
* Pass Rx packet to the netif. Update statistics.
* Called in softirq context (NAPI poll).
*/
void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
struct wil_net_stats *stats, bool gro)
{
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct wireless_dev *wdev = vif_to_wdev(vif);
unsigned int len = skb->len;
u8 *sa, *da = wil_skb_get_da(skb);
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
* is not suitable, need to look at data
*/
int mcast = is_multicast_ether_addr(da);
struct sk_buff *xmit_skb = NULL;
if (wdev->iftype == NL80211_IFTYPE_STATION) {
sa = wil_skb_get_sa(skb);
if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
/* mcast packet looped back to us */
dev_kfree_skb(skb);
ndev->stats.rx_dropped++;
stats->rx_dropped++;
wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
return;
}
} else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
if (mcast) {
/* send multicast frames both to higher layers in
* local net stack and back to the wireless medium
*/
xmit_skb = skb_copy(skb, GFP_ATOMIC);
} else {
int xmit_cid = wil_find_cid(wil, vif->mid, da);
if (xmit_cid >= 0) {
/* The destination station is associated to
* this AP (in this VLAN), so send the frame
* directly to it and do not pass it to local
* net stack.
*/
xmit_skb = skb;
skb = NULL;
}
}
}
if (xmit_skb) {
/* Send to wireless media and increase priority by 256 to
* keep the received priority instead of reclassifying
* the frame (see cfg80211_classify8021d).
*/
xmit_skb->dev = ndev;
xmit_skb->priority += 256;
xmit_skb->protocol = htons(ETH_P_802_3);
skb_reset_network_header(xmit_skb);
skb_reset_mac_header(xmit_skb);
wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
dev_queue_xmit(xmit_skb);
}
if (skb) { /* deliver to local stack */
skb->protocol = eth_type_trans(skb, ndev);
skb->dev = ndev;
if (skb->protocol == cpu_to_be16(ETH_P_PAE))
wil_rx_handle_eapol(vif, skb);
if (gro)
napi_gro_receive(&wil->napi_rx, skb);
else
netif_rx(skb);
}
ndev->stats.rx_packets++;
stats->rx_packets++;
ndev->stats.rx_bytes += len;
stats->rx_bytes += len;
if (mcast)
ndev->stats.multicast++;
}
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
{
int cid, security;
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct wil_net_stats *stats;
wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
stats = &wil->sta[cid].stats;
skb_orphan(skb);
if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len);
dev_kfree_skb(skb);
ndev->stats.rx_dropped++;
stats->rx_replay++;
stats->rx_dropped++;
return;
}
/* check errors reported by HW and update statistics */
if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
dev_kfree_skb(skb);
return;
}
wil_netif_rx(skb, ndev, cid, stats, true);
}
/* Proceed all completed skb's from Rx VRING
*
* Safe to call from NAPI poll, i.e. softirq with interrupts enabled
*/
void wil_rx_handle(struct wil6210_priv *wil, int *quota)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct wil_ring *v = &wil->ring_rx;
struct sk_buff *skb;
if (unlikely(!v->va)) {
wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
return;
}
wil_dbg_txrx(wil, "rx_handle\n");
while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
(*quota)--;
/* monitor is currently supported on main interface only */
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
skb->dev = ndev;
skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
wil_netif_rx_any(skb, ndev);
} else {
wil_rx_reorder(wil, skb);
}
}
wil_rx_refill(wil, v->size);
}
static void wil_rx_buf_len_init(struct wil6210_priv *wil)
{
wil->rx_buf_len = rx_large_buf ?
WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
if (mtu_max > wil->rx_buf_len) {
/* do not allow RX buffers to be smaller than mtu_max, for
* backward compatibility (mtu_max parameter was also used
* to support receiving large packets)
*/
wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
wil->rx_buf_len = mtu_max;
}
}
static int wil_rx_init(struct wil6210_priv *wil, uint order)
{
struct wil_ring *vring = &wil->ring_rx;
int rc;
wil_dbg_misc(wil, "rx_init\n");
if (vring->va) {
wil_err(wil, "Rx ring already allocated\n");
return -EINVAL;
}
wil_rx_buf_len_init(wil);
vring->size = 1 << order;
vring->is_rx = true;
rc = wil_vring_alloc(wil, vring);
if (rc)
return rc;
rc = wmi_rx_chain_add(wil, vring);
if (rc)
goto err_free;
rc = wil_rx_refill(wil, vring->size);
if (rc)
goto err_free;
return 0;
err_free:
wil_vring_free(wil, vring);
return rc;
}
static void wil_rx_fini(struct wil6210_priv *wil)
{
struct wil_ring *vring = &wil->ring_rx;
wil_dbg_misc(wil, "rx_fini\n");
if (vring->va)
wil_vring_free(wil, vring);
}
static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
u32 len, int vring_index)
{
struct vring_tx_desc *d = &desc->legacy;
wil_desc_addr_set(&d->dma.addr, pa);
d->dma.ip_length = 0;
/* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
d->dma.b11 = 0/*14 | BIT(7)*/;
d->dma.error = 0;
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
d->dma.length = cpu_to_le16((u16)len);
d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
d->mac.d[0] = 0;
d->mac.d[1] = 0;
d->mac.d[2] = 0;
d->mac.ucode_cmd = 0;
/* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
(1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
return 0;
}
void wil_tx_data_init(struct wil_ring_tx_data *txdata)
{
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->enabled = 0;
txdata->idle = 0;
txdata->last_idle = 0;
txdata->begin = 0;
txdata->agg_wsize = 0;
txdata->agg_timeout = 0;
txdata->agg_amsdu = 0;
txdata->addba_in_progress = false;
txdata->mid = U8_MAX;
spin_unlock_bh(&txdata->lock);
}
static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
int cid, int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct wmi_vring_cfg_cmd cmd = {
.action = cpu_to_le32(WMI_VRING_CMD_ADD),
.vring_cfg = {
.tx_sw_ring = {
.max_mpdu_size =
cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.ring_size = cpu_to_le16(size),
},
.ringid = id,
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.mac_ctrl = 0,
.to_resolution = 0,
.agg_max_wsize = 0,
.schd_params = {
.priority = cpu_to_le16(0),
.timeslot_us = cpu_to_le16(0xfff),
},
},
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_vring_cfg_done_event cmd;
} __packed reply = {
.cmd = {.status = WMI_FW_STATUS_FAILURE},
};
struct wil_ring *vring = &wil->ring_tx[id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
if (cid >= WIL6210_RX_DESC_MAX_CID) {
cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID;
cmd.vring_cfg.cid = cid;
cmd.vring_cfg.tid = tid;
} else {
cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid);
}
wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
if (vring->va) {
wil_err(wil, "Tx ring [%d] already allocated\n", id);
rc = -EINVAL;
goto out;
}
wil_tx_data_init(txdata);
vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
wil->ring2cid_tid[id][0] = cid;
wil->ring2cid_tid[id][1] = tid;
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
if (!vif->privacy)
txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
goto out_free;
if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "Tx config failed, status 0x%02x\n",
reply.cmd.status);
rc = -EINVAL;
goto out_free;
}
spin_lock_bh(&txdata->lock);
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->mid = vif->mid;
txdata->enabled = 1;
spin_unlock_bh(&txdata->lock);
if (txdata->dot1x_open && (agg_wsize >= 0))
wil_addba_tx_request(wil, id, agg_wsize);
return 0;
out_free:
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil_vring_free(wil, vring);
wil->ring2cid_tid[id][0] = wil->max_assoc_sta;
wil->ring2cid_tid[id][1] = 0;
out:
return rc;
}
static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct wmi_vring_cfg_cmd cmd = {
.action = cpu_to_le32(WMI_VRING_CMD_MODIFY),
.vring_cfg = {
.tx_sw_ring = {
.max_mpdu_size =
cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.ring_size = 0,
},
.ringid = ring_id,
.cidxtid = mk_cidxtid(cid, tid),
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.mac_ctrl = 0,
.to_resolution = 0,
.agg_max_wsize = 0,
.schd_params = {
.priority = cpu_to_le16(0),
.timeslot_us = cpu_to_le16(0xfff),
},
},
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_vring_cfg_done_event cmd;
} __packed reply = {
.cmd = {.status = WMI_FW_STATUS_FAILURE},
};
struct wil_ring *vring = &wil->ring_tx[ring_id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id,
cid, tid);
lockdep_assert_held(&wil->mutex);
if (!vring->va) {
wil_err(wil, "Tx ring [%d] not allocated\n", ring_id);
return -EINVAL;
}
if (wil->ring2cid_tid[ring_id][0] != cid ||
wil->ring2cid_tid[ring_id][1] != tid) {
wil_err(wil, "ring info does not match cid=%u tid=%u\n",
wil->ring2cid_tid[ring_id][0],
wil->ring2cid_tid[ring_id][1]);
}
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
goto fail;
if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "Tx modify failed, status 0x%02x\n",
reply.cmd.status);
rc = -EINVAL;
goto fail;
}
/* set BA aggregation window size to 0 to force a new BA with the
* new AP
*/
txdata->agg_wsize = 0;
if (txdata->dot1x_open && agg_wsize >= 0)
wil_addba_tx_request(wil, ring_id, agg_wsize);
return 0;
fail:
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
wil->ring2cid_tid[ring_id][1] = 0;
return rc;
}
int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
struct wmi_bcast_vring_cfg_cmd cmd = {
.action = cpu_to_le32(WMI_VRING_CMD_ADD),
.vring_cfg = {
.tx_sw_ring = {
.max_mpdu_size =
cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.ring_size = cpu_to_le16(size),
},
.ringid = id,
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
},
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_vring_cfg_done_event cmd;
} __packed reply = {
.cmd = {.status = WMI_FW_STATUS_FAILURE},
};
struct wil_ring *vring = &wil->ring_tx[id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
if (vring->va) {
wil_err(wil, "Tx ring [%d] already allocated\n", id);
rc = -EINVAL;
goto out;
}
wil_tx_data_init(txdata);
vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
wil->ring2cid_tid[id][0] = wil->max_assoc_sta; /* CID */
wil->ring2cid_tid[id][1] = 0; /* TID */
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
if (!vif->privacy)
txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
goto out_free;
if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "Tx config failed, status 0x%02x\n",
reply.cmd.status);
rc = -EINVAL;
goto out_free;
}
spin_lock_bh(&txdata->lock);
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->mid = vif->mid;
txdata->enabled = 1;
spin_unlock_bh(&txdata->lock);
return 0;
out_free:
spin_lock_bh(&txdata->lock);
txdata->enabled = 0;
txdata->dot1x_open = false;
spin_unlock_bh(&txdata->lock);
wil_vring_free(wil, vring);
out:
return rc;
}
static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct sk_buff *skb)
{
int i, cid;
const u8 *da = wil_skb_get_da(skb);
int min_ring_id = wil_get_min_tx_ring_id(wil);
cid = wil_find_cid(wil, vif->mid, da);
if (cid < 0 || cid >= wil->max_assoc_sta)
return NULL;
/* TODO: fix for multiple TID */
for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
if (wil->ring2cid_tid[i][0] == cid) {
struct wil_ring *v = &wil->ring_tx[i];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
da, i);
if (v->va && txdata->enabled) {
return v;
} else {
wil_dbg_txrx(wil,
"find_tx_ucast: vring[%d] not valid\n",
i);
return NULL;
}
}
}
return NULL;
}
static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct wil_ring *ring, struct sk_buff *skb);
static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct sk_buff *skb)
{
struct wil_ring *ring;
int i;
u8 cid;
struct wil_ring_tx_data *txdata;
int min_ring_id = wil_get_min_tx_ring_id(wil);
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
* find 1-st vring eligible for this skb and use it.
*/
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
ring = &wil->ring_tx[i];
txdata = &wil->ring_tx_data[i];
if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
continue;
cid = wil->ring2cid_tid[i][0];
if (cid >= wil->max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
return ring;
}
wil_dbg_txrx(wil, "Tx while no rings active?\n");
return NULL;
}
/* Use one of 2 strategies:
*
* 1. New (real broadcast):
* use dedicated broadcast vring
* 2. Old (pseudo-DMS):
* Find 1-st vring and return it;
* duplicate skb and send it to other active vrings;
* in all cases override dest address to unicast peer's address
* Use old strategy when new is not supported yet:
* - for PBSS
*/
static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct sk_buff *skb)
{
struct wil_ring *v;
struct wil_ring_tx_data *txdata;
int i = vif->bcast_ring;
if (i < 0)
return NULL;
v = &wil->ring_tx[i];
txdata = &wil->ring_tx_data[i];
if (!v->va || !txdata->enabled)
return NULL;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
return NULL;
return v;
}
/* apply multicast to unicast only for ARP and IP packets
* (see NL80211_CMD_SET_MULTICAST_TO_UNICAST for more info)
*/
static bool wil_check_multicast_to_unicast(struct wil6210_priv *wil,
struct sk_buff *skb)
{
const struct ethhdr *eth = (void *)skb->data;
const struct vlan_ethhdr *ethvlan = (void *)skb->data;
__be16 ethertype;
if (!wil->multicast_to_unicast)
return false;
/* multicast to unicast conversion only for some payload */
ethertype = eth->h_proto;
if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
ethertype = ethvlan->h_vlan_encapsulated_proto;
switch (ethertype) {
case htons(ETH_P_ARP):
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
break;
default:
return false;
}
return true;
}
static void wil_set_da_for_vring(struct wil6210_priv *wil,
struct sk_buff *skb, int vring_index)
{
u8 *da = wil_skb_get_da(skb);
int cid = wil->ring2cid_tid[vring_index][0];
ether_addr_copy(da, wil->sta[cid].addr);
}
static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct sk_buff *skb)
{
struct wil_ring *v, *v2;
struct sk_buff *skb2;
int i;
u8 cid;
const u8 *src = wil_skb_get_sa(skb);
struct wil_ring_tx_data *txdata, *txdata2;
int min_ring_id = wil_get_min_tx_ring_id(wil);
/* find 1-st vring eligible for data */
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
v = &wil->ring_tx[i];
txdata = &wil->ring_tx_data[i];
if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
continue;
cid = wil->ring2cid_tid[i][0];
if (cid >= wil->max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
/* don't Tx back to source when re-routing Rx->Tx at the AP */
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
continue;
goto found;
}
wil_dbg_txrx(wil, "Tx while no vrings active?\n");
return NULL;
found:
wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
wil_set_da_for_vring(wil, skb, i);
/* find other active vrings and duplicate skb for each */
for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
v2 = &wil->ring_tx[i];
txdata2 = &wil->ring_tx_data[i];
if (!v2->va || txdata2->mid != vif->mid)
continue;
cid = wil->ring2cid_tid[i][0];
if (cid >= wil->max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
continue;
skb2 = skb_copy(skb, GFP_ATOMIC);
if (skb2) {
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
wil_set_da_for_vring(wil, skb2, i);
wil_tx_ring(wil, vif, v2, skb2);
/* successful call to wil_tx_ring takes skb2 ref */
dev_kfree_skb_any(skb2);
} else {
wil_err(wil, "skb_copy failed\n");
}
}
return v;
}
static inline
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
{
d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
}
/* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
* @skb is used to obtain the protocol and headers length.
* @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
* 2 - middle, 3 - last descriptor.
*/
static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
struct sk_buff *skb,
int tso_desc_type, bool is_ipv4,
int tcp_hdr_len, int skb_net_hdr_len)
{
d->dma.b11 = ETH_HLEN; /* MAC header length */
d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
/* L4 header len: TCP header length */
d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
/* Setup TSO: bit and desc type */
d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
(tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
d->dma.ip_length = skb_net_hdr_len;
/* Enable TCP/UDP checksum */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
/* Calculate pseudo-header */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
}
/* Sets the descriptor @d up for csum. The corresponding
* @skb is used to obtain the protocol and headers length.
* Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
* Note, if d==NULL, the function only returns the protocol result.
*
* It is very similar to previous wil_tx_desc_offload_setup_tso. This
* is "if unrolling" to optimize the critical path.
*/
static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
struct sk_buff *skb){
int protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
d->dma.b11 = ETH_HLEN; /* MAC header length */
switch (skb->protocol) {
case cpu_to_be16(ETH_P_IP):
protocol = ip_hdr(skb)->protocol;
d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
break;
case cpu_to_be16(ETH_P_IPV6):
protocol = ipv6_hdr(skb)->nexthdr;
break;
default:
return -EINVAL;
}
switch (protocol) {
case IPPROTO_TCP:
d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
/* L4 header len: TCP header length */
d->dma.d0 |=
(tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
break;
case IPPROTO_UDP:
/* L4 header len: UDP header length */
d->dma.d0 |=
(sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
break;
default:
return -EINVAL;
}
d->dma.ip_length = skb_network_header_len(skb);
/* Enable TCP/UDP checksum */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
/* Calculate pseudo-header */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
return 0;
}
static inline void wil_tx_last_desc(struct vring_tx_desc *d)
{
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
}
static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
{
d->dma.d0 |= wil_tso_type_lst <<
DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
}
static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct wil_ring *vring, struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
/* point to descriptors in shared memory */
volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
*_first_desc = NULL;
/* pointers to shadow descriptors */
struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
*d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
*first_desc = &first_desc_mem;
/* pointer to shadow descriptors' context */
struct wil_ctx *hdr_ctx, *first_ctx = NULL;
int descs_used = 0; /* total number of used descriptors */
int sg_desc_cnt = 0; /* number of descriptors for current mss*/
u32 swhead = vring->swhead;
int used, avail = wil_ring_avail_tx(vring);
int nr_frags = skb_shinfo(skb)->nr_frags;
int min_desc_required = nr_frags + 1;
int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
int f, len, hdrlen, headlen;
int vring_index = vring - wil->ring_tx;
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
uint i = swhead;
dma_addr_t pa;
const skb_frag_t *frag = NULL;
int rem_data = mss;
int lenmss;
int hdr_compensation_need = true;
int desc_tso_type = wil_tso_type_first;
bool is_ipv4;
int tcp_hdr_len;
int skb_net_hdr_len;
int gso_type;
int rc = -EINVAL;
wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
/* A typical page 4K is 3-4 payloads, we assume each fragment
* is a full payload, that's how min_desc_required has been
* calculated. In real we might need more or less descriptors,
* this is the initial check only.
*/
if (unlikely(avail < min_desc_required)) {
wil_err_ratelimited(wil,
"TSO: Tx ring[%2d] full. No space for %d fragments\n",
vring_index, min_desc_required);
return -ENOMEM;
}
/* Header Length = MAC header len + IP header len + TCP header len*/
hdrlen = skb_tcp_all_headers(skb);
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
switch (gso_type) {
case SKB_GSO_TCPV4:
/* TCP v4, zero out the IP length and IPv4 checksum fields
* as required by the offloading doc
*/
ip_hdr(skb)->tot_len = 0;
ip_hdr(skb)->check = 0;
is_ipv4 = true;
break;
case SKB_GSO_TCPV6:
/* TCP v6, zero out the payload length */
ipv6_hdr(skb)->payload_len = 0;
is_ipv4 = false;
break;
default:
/* other than TCPv4 or TCPv6 types are not supported for TSO.
* It is also illegal for both to be set simultaneously
*/
return -EINVAL;
}
if (skb->ip_summed != CHECKSUM_PARTIAL)
return -EINVAL;
/* tcp header length and skb network header length are fixed for all
* packet's descriptors - read then once here
*/
tcp_hdr_len = tcp_hdrlen(skb);
skb_net_hdr_len = skb_network_header_len(skb);
_hdr_desc = &vring->va[i].tx.legacy;
pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "TSO: Skb head DMA map error\n");
goto err_exit;
}
wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
hdrlen, vring_index);
wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
tcp_hdr_len, skb_net_hdr_len);
wil_tx_last_desc(hdr_desc);
vring->ctx[i].mapped_as = wil_mapped_as_single;
hdr_ctx = &vring->ctx[i];
descs_used++;
headlen = skb_headlen(skb) - hdrlen;
for (f = headlen ? -1 : 0; f < nr_frags; f++) {
if (headlen) {
len = headlen;
wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
len);
} else {
frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
}
while (len) {
wil_dbg_txrx(wil,
"TSO: len %d, rem_data %d, descs_used %d\n",
len, rem_data, descs_used);
if (descs_used == avail) {
wil_err_ratelimited(wil, "TSO: ring overflow\n");
rc = -ENOMEM;
goto mem_error;
}
lenmss = min_t(int, rem_data, len);
i = (swhead + descs_used) % vring->size;
wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
if (!headlen) {
pa = skb_frag_dma_map(dev, frag,
skb_frag_size(frag) - len,
lenmss, DMA_TO_DEVICE);
vring->ctx[i].mapped_as = wil_mapped_as_page;
} else {
pa = dma_map_single(dev,
skb->data +
skb_headlen(skb) - headlen,
lenmss,
DMA_TO_DEVICE);
vring->ctx[i].mapped_as = wil_mapped_as_single;
headlen -= lenmss;
}
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "TSO: DMA map page error\n");
goto mem_error;
}
_desc = &vring->va[i].tx.legacy;
if (!_first_desc) {
_first_desc = _desc;
first_ctx = &vring->ctx[i];
d = first_desc;
} else {
d = &desc_mem;
}
wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
pa, lenmss, vring_index);
wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
is_ipv4, tcp_hdr_len,
skb_net_hdr_len);
/* use tso_type_first only once */
desc_tso_type = wil_tso_type_mid;
descs_used++; /* desc used so far */
sg_desc_cnt++; /* desc used for this segment */
len -= lenmss;
rem_data -= lenmss;
wil_dbg_txrx(wil,
"TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
len, rem_data, descs_used, sg_desc_cnt);
/* Close the segment if reached mss size or last frag*/
if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
if (hdr_compensation_need) {
/* first segment include hdr desc for
* release
*/
hdr_ctx->nr_frags = sg_desc_cnt;
wil_tx_desc_set_nr_frags(first_desc,
sg_desc_cnt +
1);
hdr_compensation_need = false;
} else {
wil_tx_desc_set_nr_frags(first_desc,
sg_desc_cnt);
}
first_ctx->nr_frags = sg_desc_cnt - 1;
wil_tx_last_desc(d);
/* first descriptor may also be the last
* for this mss - make sure not to copy
* it twice
*/
if (first_desc != d)
*_first_desc = *first_desc;
/*last descriptor will be copied at the end
* of this TS processing
*/
if (f < nr_frags - 1 || len > 0)
*_desc = *d;
rem_data = mss;
_first_desc = NULL;
sg_desc_cnt = 0;
} else if (first_desc != d) /* update mid descriptor */
*_desc = *d;
}
}
if (!_desc)
goto mem_error;
/* first descriptor may also be the last.
* in this case d pointer is invalid
*/
if (_first_desc == _desc)
d = first_desc;
/* Last data descriptor */
wil_set_tx_desc_last_tso(d);
*_desc = *d;
/* Fill the total number of descriptors in first desc (hdr)*/
wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
*_hdr_desc = *hdr_desc;
/* hold reference to skb
* to prevent skb release before accounting
* in case of immediate "tx done"
*/
vring->ctx[i].skb = skb_get(skb);
/* performance monitoring */
used = wil_ring_used_tx(vring);
if (wil_val_in_range(wil->ring_idle_trsh,
used, used + descs_used)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
vring_index, used, used + descs_used);
}
/* Make sure to advance the head only after descriptor update is done.
* This will prevent a race condition where the completion thread
* will see the DU bit set from previous run and will handle the
* skb before it was completed.
*/
wmb();
/* advance swhead */
wil_ring_advance_head(vring, descs_used);
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
if (wil->tx_latency)
*(ktime_t *)&skb->cb = ktime_get();
else
memset(skb->cb, 0, sizeof(ktime_t));
wil_w(wil, vring->hwtail, vring->swhead);
return 0;
mem_error:
while (descs_used > 0) {
struct wil_ctx *ctx;
i = (swhead + descs_used - 1) % vring->size;
d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
_desc = &vring->va[i].tx.legacy;
*d = *_desc;
_desc->dma.status = TX_DMA_STATUS_DU;
ctx = &vring->ctx[i];
wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
memset(ctx, 0, sizeof(*ctx));
descs_used--;
}
err_exit:
return rc;
}
static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct wil_ring *ring, struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
struct vring_tx_desc dd, *d = ⅆ
volatile struct vring_tx_desc *_d;
u32 swhead = ring->swhead;
int avail = wil_ring_avail_tx(ring);
int nr_frags = skb_shinfo(skb)->nr_frags;
uint f = 0;
int ring_index = ring - wil->ring_tx;
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
uint i = swhead;
dma_addr_t pa;
int used;
bool mcast = (ring_index == vif->bcast_ring);
uint len = skb_headlen(skb);
wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
skb->len, ring_index, nr_frags);
if (unlikely(!txdata->enabled))
return -EINVAL;
if (unlikely(avail < 1 + nr_frags)) {
wil_err_ratelimited(wil,
"Tx ring[%2d] full. No space for %d fragments\n",
ring_index, 1 + nr_frags);
return -ENOMEM;
}
_d = &ring->va[i].tx.legacy;
pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
skb_headlen(skb), skb->data, &pa);
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
if (unlikely(dma_mapping_error(dev, pa)))
return -EINVAL;
ring->ctx[i].mapped_as = wil_mapped_as_single;
/* 1-st segment */
wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
ring_index);
if (unlikely(mcast)) {
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
}
/* Process TCP/UDP checksum offloading */
if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
ring_index);
goto dma_error;
}
ring->ctx[i].nr_frags = nr_frags;
wil_tx_desc_set_nr_frags(d, nr_frags + 1);
/* middle segments */
for (; f < nr_frags; f++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
int len = skb_frag_size(frag);
*_d = *d;
wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
i = (swhead + f + 1) % ring->size;
_d = &ring->va[i].tx.legacy;
pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "Tx[%2d] failed to map fragment\n",
ring_index);
goto dma_error;
}
ring->ctx[i].mapped_as = wil_mapped_as_page;
wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
pa, len, ring_index);
/* no need to check return code -
* if it succeeded for 1-st descriptor,
* it will succeed here too
*/
wil_tx_desc_offload_setup(d, skb);
}
/* for the last seg only */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
*_d = *d;
wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
/* hold reference to skb
* to prevent skb release before accounting
* in case of immediate "tx done"
*/
ring->ctx[i].skb = skb_get(skb);
/* performance monitoring */
used = wil_ring_used_tx(ring);
if (wil_val_in_range(wil->ring_idle_trsh,
used, used + nr_frags + 1)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
ring_index, used, used + nr_frags + 1);
}
/* Make sure to advance the head only after descriptor update is done.
* This will prevent a race condition where the completion thread
* will see the DU bit set from previous run and will handle the
* skb before it was completed.
*/
wmb();
/* advance swhead */
wil_ring_advance_head(ring, nr_frags + 1);
wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
ring->swhead);
trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
if (wil->tx_latency)
*(ktime_t *)&skb->cb = ktime_get();
else
memset(skb->cb, 0, sizeof(ktime_t));
wil_w(wil, ring->hwtail, ring->swhead);
return 0;
dma_error:
/* unmap what we have mapped */
nr_frags = f + 1; /* frags mapped + one for skb head */
for (f = 0; f < nr_frags; f++) {
struct wil_ctx *ctx;
i = (swhead + f) % ring->size;
ctx = &ring->ctx[i];
_d = &ring->va[i].tx.legacy;
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
wil->txrx_ops.tx_desc_unmap(dev,
(union wil_tx_desc *)d,
ctx);
memset(ctx, 0, sizeof(*ctx));
}
return -EINVAL;
}
static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct wil_ring *ring, struct sk_buff *skb)
{
int ring_index = ring - wil->ring_tx;
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
int rc;
spin_lock(&txdata->lock);
if (test_bit(wil_status_suspending, wil->status) ||
test_bit(wil_status_suspended, wil->status) ||
test_bit(wil_status_resuming, wil->status)) {
wil_dbg_txrx(wil,
"suspend/resume in progress. drop packet\n");
spin_unlock(&txdata->lock);
return -EINVAL;
}
rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
(wil, vif, ring, skb);
spin_unlock(&txdata->lock);
return rc;
}
/* Check status of tx vrings and stop/wake net queues if needed
* It will start/stop net queues of a specific VIF net_device.
*
* This function does one of two checks:
* In case check_stop is true, will check if net queues need to be stopped. If
* the conditions for stopping are met, netif_tx_stop_all_queues() is called.
* In case check_stop is false, will check if net queues need to be waked. If
* the conditions for waking are met, netif_tx_wake_all_queues() is called.
* vring is the vring which is currently being modified by either adding
* descriptors (tx) into it or removing descriptors (tx complete) from it. Can
* be null when irrelevant (e.g. connect/disconnect events).
*
* The implementation is to stop net queues if modified vring has low
* descriptor availability. Wake if all vrings are not in low descriptor
* availability and modified vring has high descriptor availability.
*/
static inline void __wil_update_net_queues(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct wil_ring *ring,
bool check_stop)
{
int i;
int min_ring_id = wil_get_min_tx_ring_id(wil);
if (unlikely(!vif))
return;
if (ring)
wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
(int)(ring - wil->ring_tx), vif->mid, check_stop,
vif->net_queue_stopped);
else
wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
check_stop, vif->mid, vif->net_queue_stopped);
if (ring && drop_if_ring_full)
/* no need to stop/wake net queues */
return;
if (check_stop == vif->net_queue_stopped)
/* net queues already in desired state */
return;
if (check_stop) {
if (!ring || unlikely(wil_ring_avail_low(ring))) {
/* not enough room in the vring */
netif_tx_stop_all_queues(vif_to_ndev(vif));
vif->net_queue_stopped = true;
wil_dbg_txrx(wil, "netif_tx_stop called\n");
}
return;
}
/* Do not wake the queues in suspend flow */
if (test_bit(wil_status_suspending, wil->status) ||
test_bit(wil_status_suspended, wil->status))
return;
/* check wake */
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
struct wil_ring *cur_ring = &wil->ring_tx[i];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
if (txdata->mid != vif->mid || !cur_ring->va ||
!txdata->enabled || cur_ring == ring)
continue;
if (wil_ring_avail_low(cur_ring)) {
wil_dbg_txrx(wil, "ring %d full, can't wake\n",
(int)(cur_ring - wil->ring_tx));
return;
}
}
if (!ring || wil_ring_avail_high(ring)) {
/* enough room in the ring */
wil_dbg_txrx(wil, "calling netif_tx_wake\n");
netif_tx_wake_all_queues(vif_to_ndev(vif));
vif->net_queue_stopped = false;
}
}
void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct wil_ring *ring, bool check_stop)
{
spin_lock(&wil->net_queue_lock);
__wil_update_net_queues(wil, vif, ring, check_stop);
spin_unlock(&wil->net_queue_lock);
}
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct wil_ring *ring, bool check_stop)
{
spin_lock_bh(&wil->net_queue_lock);
__wil_update_net_queues(wil, vif, ring, check_stop);
spin_unlock_bh(&wil->net_queue_lock);
}
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = vif_to_wil(vif);
const u8 *da = wil_skb_get_da(skb);
bool bcast = is_multicast_ether_addr(da);
struct wil_ring *ring;
static bool pr_once_fw;
int rc;
wil_dbg_txrx(wil, "start_xmit\n");
if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
if (!pr_once_fw) {
wil_err(wil, "FW not ready\n");
pr_once_fw = true;
}
goto drop;
}
if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) {
wil_dbg_ratelimited(wil,
"VIF not connected, packet dropped\n");
goto drop;
}
if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) {
wil_err(wil, "Xmit in monitor mode not supported\n");
goto drop;
}
pr_once_fw = false;
/* find vring */
if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
/* in STA mode (ESS), all to same VRING (to AP) */
ring = wil_find_tx_ring_sta(wil, vif, skb);
} else if (bcast) {
if (vif->pbss || wil_check_multicast_to_unicast(wil, skb))
/* in pbss, no bcast VRING - duplicate skb in
* all stations VRINGs
*/
ring = wil_find_tx_bcast_2(wil, vif, skb);
else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
/* AP has a dedicated bcast VRING */
ring = wil_find_tx_bcast_1(wil, vif, skb);
else
/* unexpected combination, fallback to duplicating
* the skb in all stations VRINGs
*/
ring = wil_find_tx_bcast_2(wil, vif, skb);
} else {
/* unicast, find specific VRING by dest. address */
ring = wil_find_tx_ucast(wil, vif, skb);
}
if (unlikely(!ring)) {
wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da);
goto drop;
}
/* set up vring entry */
rc = wil_tx_ring(wil, vif, ring, skb);
switch (rc) {
case 0:
/* shall we stop net queues? */
wil_update_net_queues_bh(wil, vif, ring, true);
/* statistics will be updated on the tx_complete */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
case -ENOMEM:
if (drop_if_ring_full)
goto drop;
return NETDEV_TX_BUSY;
default:
break; /* goto drop; */
}
drop:
ndev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NET_XMIT_DROP;
}
void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
struct wil_sta_info *sta)
{
int skb_time_us;
int bin;
if (!wil->tx_latency)
return;
if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
return;
skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
bin = skb_time_us / wil->tx_latency_res;
bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
sta->tx_latency_bins[bin]++;
sta->stats.tx_latency_total_us += skb_time_us;
if (skb_time_us < sta->stats.tx_latency_min_us)
sta->stats.tx_latency_min_us = skb_time_us;
if (skb_time_us > sta->stats.tx_latency_max_us)
sta->stats.tx_latency_max_us = skb_time_us;
}
/* Clean up transmitted skb's from the Tx VRING
*
* Return number of descriptors cleared
*
* Safe to call from IRQ
*/
int wil_tx_complete(struct wil6210_vif *vif, int ringid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct net_device *ndev = vif_to_ndev(vif);
struct device *dev = wil_to_dev(wil);
struct wil_ring *vring = &wil->ring_tx[ringid];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int done = 0;
int cid = wil->ring2cid_tid[ringid][0];
struct wil_net_stats *stats = NULL;
volatile struct vring_tx_desc *_d;
int used_before_complete;
int used_new;
if (unlikely(!vring->va)) {
wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
return 0;
}
if (unlikely(!txdata->enabled)) {
wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
return 0;
}
wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
used_before_complete = wil_ring_used_tx(vring);
if (cid < wil->max_assoc_sta)
stats = &wil->sta[cid].stats;
while (!wil_ring_is_empty(vring)) {
int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
/* For the fragmented skb, HW will set DU bit only for the
* last fragment. look for it.
* In TSO the first DU will include hdr desc
*/
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
/* TODO: check we are not past head */
_d = &vring->va[lf].tx.legacy;
if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
break;
new_swtail = (lf + 1) % vring->size;
while (vring->swtail != new_swtail) {
struct vring_tx_desc dd, *d = ⅆ
u16 dmalen;
struct sk_buff *skb;
ctx = &vring->ctx[vring->swtail];
skb = ctx->skb;
_d = &vring->va[vring->swtail].tx.legacy;
*d = *_d;
dmalen = le16_to_cpu(d->dma.length);
trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
d->dma.error);
wil_dbg_txrx(wil,
"TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
ringid, vring->swtail, dmalen,
d->dma.status, d->dma.error);
wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
wil->txrx_ops.tx_desc_unmap(dev,
(union wil_tx_desc *)d,
ctx);
if (skb) {
if (likely(d->dma.error == 0)) {
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
if (stats) {
stats->tx_packets++;
stats->tx_bytes += skb->len;
wil_tx_latency_calc(wil, skb,
&wil->sta[cid]);
}
} else {
ndev->stats.tx_errors++;
if (stats)
stats->tx_errors++;
}
if (skb->protocol == cpu_to_be16(ETH_P_PAE))
wil_tx_complete_handle_eapol(vif, skb);
wil_consume_skb(skb, d->dma.error == 0);
}
memset(ctx, 0, sizeof(*ctx));
/* Make sure the ctx is zeroed before updating the tail
* to prevent a case where wil_tx_ring will see
* this descriptor as used and handle it before ctx zero
* is completed.
*/
wmb();
/* There is no need to touch HW descriptor:
* - ststus bit TX_DMA_STATUS_DU is set by design,
* so hardware will not try to process this desc.,
* - rest of descriptor will be initialized on Tx.
*/
vring->swtail = wil_ring_next_tail(vring);
done++;
}
}
/* performance monitoring */
used_new = wil_ring_used_tx(vring);
if (wil_val_in_range(wil->ring_idle_trsh,
used_new, used_before_complete)) {
wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
ringid, used_before_complete, used_new);
txdata->last_idle = get_cycles();
}
/* shall we wake net queues? */
if (done)
wil_update_net_queues(wil, vif, vring, false);
return done;
}
static inline int wil_tx_init(struct wil6210_priv *wil)
{
return 0;
}
static inline void wil_tx_fini(struct wil6210_priv *wil) {}
static void wil_get_reorder_params(struct wil6210_priv *wil,
struct sk_buff *skb, int *tid, int *cid,
int *mid, u16 *seq, int *mcast, int *retry)
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
*tid = wil_rxdesc_tid(d);
*cid = wil_skb_get_cid(skb);
*mid = wil_rxdesc_mid(d);
*seq = wil_rxdesc_seq(d);
*mcast = wil_rxdesc_mcast(d);
*retry = wil_rxdesc_retry(d);
}
void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
{
wil->txrx_ops.configure_interrupt_moderation =
wil_configure_interrupt_moderation;
/* TX ops */
wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
wil->txrx_ops.ring_fini_tx = wil_vring_free;
wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
wil->txrx_ops.tx_init = wil_tx_init;
wil->txrx_ops.tx_fini = wil_tx_fini;
wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify;
/* RX ops */
wil->txrx_ops.rx_init = wil_rx_init;
wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
wil->txrx_ops.get_netif_rx_params =
wil_get_netif_rx_params;
wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
wil->txrx_ops.rx_error_check = wil_rx_error_check;
wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
wil->txrx_ops.rx_fini = wil_rx_fini;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/txrx.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "wil6210.h"
#include "txrx.h"
#define SEQ_MODULO 0x1000
#define SEQ_MASK 0xfff
static inline int seq_less(u16 sq1, u16 sq2)
{
return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
}
static inline u16 seq_inc(u16 sq)
{
return (sq + 1) & SEQ_MASK;
}
static inline u16 seq_sub(u16 sq1, u16 sq2)
{
return (sq1 - sq2) & SEQ_MASK;
}
static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
{
return seq_sub(seq, r->ssn) % r->buf_size;
}
static void wil_release_reorder_frame(struct net_device *ndev,
struct wil_tid_ampdu_rx *r,
int index)
{
struct sk_buff *skb = r->reorder_buf[index];
if (!skb)
goto no_frame;
/* release the frame from the reorder ring buffer */
r->stored_mpdu_num--;
r->reorder_buf[index] = NULL;
wil_netif_rx_any(skb, ndev);
no_frame:
r->head_seq_num = seq_inc(r->head_seq_num);
}
static void wil_release_reorder_frames(struct net_device *ndev,
struct wil_tid_ampdu_rx *r,
u16 hseq)
{
int index;
/* note: this function is never called with
* hseq preceding r->head_seq_num, i.e it is always true
* !seq_less(hseq, r->head_seq_num)
* and thus on loop exit it should be
* r->head_seq_num == hseq
*/
while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
index = reorder_index(r, r->head_seq_num);
wil_release_reorder_frame(ndev, r, index);
}
r->head_seq_num = hseq;
}
static void wil_reorder_release(struct net_device *ndev,
struct wil_tid_ampdu_rx *r)
{
int index = reorder_index(r, r->head_seq_num);
while (r->reorder_buf[index]) {
wil_release_reorder_frame(ndev, r, index);
index = reorder_index(r, r->head_seq_num);
}
}
/* called in NAPI context */
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
struct wil6210_vif *vif;
struct net_device *ndev;
int tid, cid, mid, mcast, retry;
u16 seq;
struct wil_sta_info *sta;
struct wil_tid_ampdu_rx *r;
u16 hseq;
int index;
wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
&mcast, &retry);
sta = &wil->sta[cid];
wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
mid, cid, tid, seq, mcast);
vif = wil->vifs[mid];
if (unlikely(!vif)) {
wil_dbg_txrx(wil, "invalid VIF, mid %d\n", mid);
dev_kfree_skb(skb);
return;
}
ndev = vif_to_ndev(vif);
spin_lock(&sta->tid_rx_lock);
r = sta->tid_rx[tid];
if (!r) {
wil_netif_rx_any(skb, ndev);
goto out;
}
if (unlikely(mcast)) {
if (retry && seq == r->mcast_last_seq) {
r->drop_dup_mcast++;
wil_dbg_txrx(wil, "Rx drop: dup mcast seq 0x%03x\n",
seq);
dev_kfree_skb(skb);
goto out;
}
r->mcast_last_seq = seq;
wil_netif_rx_any(skb, ndev);
goto out;
}
r->total++;
hseq = r->head_seq_num;
/** Due to the race between WMI events, where BACK establishment
* reported, and data Rx, few packets may be pass up before reorder
* buffer get allocated. Catch up by pretending SSN is what we
* see in the 1-st Rx packet
*
* Another scenario, Rx get delayed and we got packet from before
* BACK. Pass it to the stack and wait.
*/
if (r->first_time) {
r->first_time = false;
if (seq != r->head_seq_num) {
if (seq_less(seq, r->head_seq_num)) {
wil_err(wil,
"Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
seq, r->head_seq_num);
r->first_time = true;
wil_netif_rx_any(skb, ndev);
goto out;
}
wil_err(wil,
"Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
seq, r->head_seq_num);
r->head_seq_num = seq;
r->ssn = seq;
}
}
/* frame with out of date sequence number */
if (seq_less(seq, r->head_seq_num)) {
r->ssn_last_drop = seq;
r->drop_old++;
wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
seq, r->head_seq_num);
dev_kfree_skb(skb);
goto out;
}
/*
* If frame the sequence number exceeds our buffering window
* size release some previous frames to make room for this one.
*/
if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
hseq = seq_inc(seq_sub(seq, r->buf_size));
/* release stored frames up to new head to stack */
wil_release_reorder_frames(ndev, r, hseq);
}
/* Now the new frame is always in the range of the reordering buffer */
index = reorder_index(r, seq);
/* check if we already stored this frame */
if (r->reorder_buf[index]) {
r->drop_dup++;
wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
dev_kfree_skb(skb);
goto out;
}
/*
* If the current MPDU is in the right order and nothing else
* is stored we can process it directly, no need to buffer it.
* If it is first but there's something stored, we may be able
* to release frames after this one.
*/
if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
r->head_seq_num = seq_inc(r->head_seq_num);
wil_netif_rx_any(skb, ndev);
goto out;
}
/* put the frame in the reordering buffer */
r->reorder_buf[index] = skb;
r->stored_mpdu_num++;
wil_reorder_release(ndev, r);
out:
spin_unlock(&sta->tid_rx_lock);
}
/* process BAR frame, called in NAPI context */
void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
u8 cid, u8 tid, u16 seq)
{
struct wil_sta_info *sta = &wil->sta[cid];
struct net_device *ndev = vif_to_ndev(vif);
struct wil_tid_ampdu_rx *r;
spin_lock(&sta->tid_rx_lock);
r = sta->tid_rx[tid];
if (!r) {
wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
goto out;
}
if (seq_less(seq, r->head_seq_num)) {
wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
seq, r->head_seq_num);
goto out;
}
wil_dbg_txrx(wil, "BAR: CID %d MID %d TID %d Seq 0x%03x head 0x%03x\n",
cid, vif->mid, tid, seq, r->head_seq_num);
wil_release_reorder_frames(ndev, r, seq);
out:
spin_unlock(&sta->tid_rx_lock);
}
struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
int size, u16 ssn)
{
struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r)
return NULL;
r->reorder_buf =
kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
if (!r->reorder_buf) {
kfree(r);
return NULL;
}
r->ssn = ssn;
r->head_seq_num = ssn;
r->buf_size = size;
r->stored_mpdu_num = 0;
r->first_time = true;
r->mcast_last_seq = U16_MAX;
return r;
}
void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
struct wil_tid_ampdu_rx *r)
{
int i;
if (!r)
return;
/* Do not pass remaining frames to the network stack - it may be
* not expecting to get any more Rx. Rx from here may lead to
* kernel OOPS since some per-socket accounting info was already
* released.
*/
for (i = 0; i < r->buf_size; i++)
kfree_skb(r->reorder_buf[i]);
kfree(r->reorder_buf);
kfree(r);
}
/* ADDBA processing */
static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
{
u16 max_agg_size = min_t(u16, wil->max_agg_wsize, wil->max_ampdu_size /
(mtu_max + WIL_MAX_MPDU_OVERHEAD));
if (!req_agg_wsize)
return max_agg_size;
return min(max_agg_size, req_agg_wsize);
}
/* Block Ack - Rx side (recipient) */
int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
u16 param_set = le16_to_cpu(ba_param_set);
u16 agg_timeout = le16_to_cpu(ba_timeout);
u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
struct wil_sta_info *sta;
u16 agg_wsize;
/* bit 0: A-MSDU supported
* bit 1: policy (should be 0 for us)
* bits 2..5: TID
* bits 6..15: buffer size
*/
u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
bool agg_amsdu = wil->use_enhanced_dma_hw &&
wil->use_rx_hw_reordering &&
test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
wil->amsdu_en && (param_set & BIT(0));
int ba_policy = param_set & BIT(1);
u16 ssn = seq_ctrl >> 4;
struct wil_tid_ampdu_rx *r;
int rc = 0;
might_sleep();
/* sanity checks */
if (cid >= wil->max_assoc_sta) {
wil_err(wil, "BACK: invalid CID %d\n", cid);
rc = -EINVAL;
goto out;
}
sta = &wil->sta[cid];
if (sta->status != wil_sta_connected) {
wil_err(wil, "BACK: CID %d not connected\n", cid);
rc = -EINVAL;
goto out;
}
wil_dbg_wmi(wil,
"ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
cid, sta->addr, tid, req_agg_wsize, agg_timeout,
agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
/* apply policies */
if (req_agg_wsize == 0) {
wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
wil->max_agg_wsize);
agg_wsize = wil->max_agg_wsize;
} else {
agg_wsize = min_t(u16, wil->max_agg_wsize, req_agg_wsize);
}
rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
WLAN_STATUS_SUCCESS, agg_amsdu,
agg_wsize, agg_timeout);
if (rc) {
wil_err(wil, "do not apply ba, rc(%d)\n", rc);
goto out;
}
/* apply */
if (!wil->use_rx_hw_reordering) {
r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
spin_lock_bh(&sta->tid_rx_lock);
wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
sta->tid_rx[tid] = r;
spin_unlock_bh(&sta->tid_rx_lock);
}
out:
return rc;
}
/* BACK - Tx side (originator) */
int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
{
u8 agg_wsize = wil_agg_size(wil, wsize);
u16 agg_timeout = 0;
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int rc = 0;
if (txdata->addba_in_progress) {
wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
ringid);
goto out;
}
if (txdata->agg_wsize) {
wil_dbg_misc(wil,
"ADDBA for vring[%d] already done for wsize %d\n",
ringid, txdata->agg_wsize);
goto out;
}
txdata->addba_in_progress = true;
rc = wmi_addba(wil, txdata->mid, ringid, agg_wsize, agg_timeout);
if (rc) {
wil_err(wil, "wmi_addba failed, rc (%d)", rc);
txdata->addba_in_progress = false;
}
out:
return rc;
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/rx_reorder.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include "wil6210.h"
#include "txrx.h"
bool wil_has_other_active_ifaces(struct wil6210_priv *wil,
struct net_device *ndev, bool up, bool ok)
{
int i;
struct wil6210_vif *vif;
struct net_device *ndev_i;
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
ndev_i = vif_to_ndev(vif);
if (ndev_i != ndev)
if ((up && (ndev_i->flags & IFF_UP)) ||
(ok && netif_carrier_ok(ndev_i)))
return true;
}
}
return false;
}
bool wil_has_active_ifaces(struct wil6210_priv *wil, bool up, bool ok)
{
/* use NULL ndev argument to check all interfaces */
return wil_has_other_active_ifaces(wil, NULL, up, ok);
}
static int wil_open(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
int rc = 0;
wil_dbg_misc(wil, "open\n");
if (debug_fw ||
test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) {
wil_err(wil, "while in debug_fw or wmi_only mode\n");
return -EINVAL;
}
if (!wil_has_other_active_ifaces(wil, ndev, true, false)) {
wil_dbg_misc(wil, "open, first iface\n");
rc = wil_pm_runtime_get(wil);
if (rc < 0)
return rc;
rc = wil_up(wil);
if (rc)
wil_pm_runtime_put(wil);
}
return rc;
}
static int wil_stop(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
int rc = 0;
wil_dbg_misc(wil, "stop\n");
if (!wil_has_other_active_ifaces(wil, ndev, true, false)) {
wil_dbg_misc(wil, "stop, last iface\n");
rc = wil_down(wil);
if (!rc)
wil_pm_runtime_put(wil);
}
return rc;
}
static const struct net_device_ops wil_netdev_ops = {
.ndo_open = wil_open,
.ndo_stop = wil_stop,
.ndo_start_xmit = wil_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
napi_rx);
int quota = budget;
int done;
wil_rx_handle(wil, "a);
done = budget - quota;
if (done < budget) {
napi_complete_done(napi, done);
wil6210_unmask_irq_rx(wil);
wil_dbg_txrx(wil, "NAPI RX complete\n");
}
wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
return done;
}
static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
napi_rx);
int quota = budget;
int done;
wil_rx_handle_edma(wil, "a);
done = budget - quota;
if (done < budget) {
napi_complete_done(napi, done);
wil6210_unmask_irq_rx_edma(wil);
wil_dbg_txrx(wil, "NAPI RX complete\n");
}
wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
return done;
}
static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
napi_tx);
int tx_done = 0;
uint i;
/* always process ALL Tx complete, regardless budget - it is fast */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
struct wil_ring *ring = &wil->ring_tx[i];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
struct wil6210_vif *vif;
if (!ring->va || !txdata->enabled ||
txdata->mid >= GET_MAX_VIFS(wil))
continue;
vif = wil->vifs[txdata->mid];
if (unlikely(!vif)) {
wil_dbg_txrx(wil, "Invalid MID %d\n", txdata->mid);
continue;
}
tx_done += wil_tx_complete(vif, i);
}
if (tx_done < budget) {
napi_complete(napi);
wil6210_unmask_irq_tx(wil);
wil_dbg_txrx(wil, "NAPI TX complete\n");
}
wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
return min(tx_done, budget);
}
static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
napi_tx);
int tx_done;
/* There is only one status TX ring */
struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
if (!sring->va)
return 0;
tx_done = wil_tx_sring_handler(wil, sring);
if (tx_done < budget) {
napi_complete(napi);
wil6210_unmask_irq_tx_edma(wil);
wil_dbg_txrx(wil, "NAPI TX complete\n");
}
wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
return min(tx_done, budget);
}
static void wil_dev_setup(struct net_device *dev)
{
ether_setup(dev);
dev->max_mtu = mtu_max;
dev->tx_queue_len = WIL_TX_Q_LEN_DEFAULT;
}
static void wil_vif_deinit(struct wil6210_vif *vif)
{
del_timer_sync(&vif->scan_timer);
del_timer_sync(&vif->p2p.discovery_timer);
cancel_work_sync(&vif->disconnect_worker);
cancel_work_sync(&vif->p2p.discovery_expired_work);
cancel_work_sync(&vif->p2p.delayed_listen_work);
wil_probe_client_flush(vif);
cancel_work_sync(&vif->probe_client_worker);
cancel_work_sync(&vif->enable_tx_key_worker);
}
void wil_vif_free(struct wil6210_vif *vif)
{
struct net_device *ndev = vif_to_ndev(vif);
wil_vif_deinit(vif);
free_netdev(ndev);
}
static void wil_ndev_destructor(struct net_device *ndev)
{
struct wil6210_vif *vif = ndev_to_vif(ndev);
wil_vif_deinit(vif);
}
static void wil_connect_timer_fn(struct timer_list *t)
{
struct wil6210_vif *vif = from_timer(vif, t, connect_timer);
struct wil6210_priv *wil = vif_to_wil(vif);
bool q;
wil_err(wil, "Connect timeout detected, disconnect station\n");
/* reschedule to thread context - disconnect won't
* run from atomic context.
* queue on wmi_wq to prevent race with connect event.
*/
q = queue_work(wil->wmi_wq, &vif->disconnect_worker);
wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q);
}
static void wil_scan_timer_fn(struct timer_list *t)
{
struct wil6210_vif *vif = from_timer(vif, t, scan_timer);
struct wil6210_priv *wil = vif_to_wil(vif);
clear_bit(wil_status_fwready, wil->status);
wil_err(wil, "Scan timeout detected, start fw error recovery\n");
wil_fw_error_recovery(wil);
}
static void wil_p2p_discovery_timer_fn(struct timer_list *t)
{
struct wil6210_vif *vif = from_timer(vif, t, p2p.discovery_timer);
struct wil6210_priv *wil = vif_to_wil(vif);
wil_dbg_misc(wil, "p2p_discovery_timer_fn\n");
schedule_work(&vif->p2p.discovery_expired_work);
}
static void wil_vif_init(struct wil6210_vif *vif)
{
vif->bcast_ring = -1;
mutex_init(&vif->probe_client_mutex);
timer_setup(&vif->connect_timer, wil_connect_timer_fn, 0);
timer_setup(&vif->scan_timer, wil_scan_timer_fn, 0);
timer_setup(&vif->p2p.discovery_timer, wil_p2p_discovery_timer_fn, 0);
INIT_WORK(&vif->probe_client_worker, wil_probe_client_worker);
INIT_WORK(&vif->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&vif->p2p.discovery_expired_work, wil_p2p_listen_expired);
INIT_WORK(&vif->p2p.delayed_listen_work, wil_p2p_delayed_listen_work);
INIT_WORK(&vif->enable_tx_key_worker, wil_enable_tx_key_worker);
INIT_LIST_HEAD(&vif->probe_client_pending);
vif->net_queue_stopped = 1;
}
static u8 wil_vif_find_free_mid(struct wil6210_priv *wil)
{
u8 i;
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
if (!wil->vifs[i])
return i;
}
return U8_MAX;
}
struct wil6210_vif *
wil_vif_alloc(struct wil6210_priv *wil, const char *name,
unsigned char name_assign_type, enum nl80211_iftype iftype)
{
struct net_device *ndev;
struct wireless_dev *wdev;
struct wil6210_vif *vif;
u8 mid;
mid = wil_vif_find_free_mid(wil);
if (mid == U8_MAX) {
wil_err(wil, "no available virtual interface\n");
return ERR_PTR(-EINVAL);
}
ndev = alloc_netdev(sizeof(*vif), name, name_assign_type,
wil_dev_setup);
if (!ndev) {
dev_err(wil_to_dev(wil), "alloc_netdev failed\n");
return ERR_PTR(-ENOMEM);
}
if (mid == 0) {
wil->main_ndev = ndev;
} else {
ndev->priv_destructor = wil_ndev_destructor;
ndev->needs_free_netdev = true;
}
vif = ndev_to_vif(ndev);
vif->ndev = ndev;
vif->wil = wil;
vif->mid = mid;
wil_vif_init(vif);
wdev = &vif->wdev;
wdev->wiphy = wil->wiphy;
wdev->iftype = iftype;
ndev->netdev_ops = &wil_netdev_ops;
wil_set_ethtoolops(ndev);
ndev->ieee80211_ptr = wdev;
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GRO |
NETIF_F_TSO | NETIF_F_TSO6;
ndev->features |= ndev->hw_features;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
return vif;
}
void *wil_if_alloc(struct device *dev)
{
struct wil6210_priv *wil;
struct wil6210_vif *vif;
int rc = 0;
wil = wil_cfg80211_init(dev);
if (IS_ERR(wil)) {
dev_err(dev, "wil_cfg80211_init failed\n");
return wil;
}
rc = wil_priv_init(wil);
if (rc) {
dev_err(dev, "wil_priv_init failed\n");
goto out_cfg;
}
wil_dbg_misc(wil, "if_alloc\n");
vif = wil_vif_alloc(wil, "wlan%d", NET_NAME_UNKNOWN,
NL80211_IFTYPE_STATION);
if (IS_ERR(vif)) {
dev_err(dev, "wil_vif_alloc failed\n");
rc = -ENOMEM;
goto out_priv;
}
wil->radio_wdev = vif_to_wdev(vif);
return wil;
out_priv:
wil_priv_deinit(wil);
out_cfg:
wil_cfg80211_deinit(wil);
return ERR_PTR(rc);
}
void wil_if_free(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
wil_dbg_misc(wil, "if_free\n");
if (!ndev)
return;
wil_priv_deinit(wil);
wil->main_ndev = NULL;
wil_ndev_destructor(ndev);
free_netdev(ndev);
wil_cfg80211_deinit(wil);
}
int wil_vif_add(struct wil6210_priv *wil, struct wil6210_vif *vif)
{
struct net_device *ndev = vif_to_ndev(vif);
struct wireless_dev *wdev = vif_to_wdev(vif);
bool any_active = wil_has_active_ifaces(wil, true, false);
int rc;
ASSERT_RTNL();
if (wil->vifs[vif->mid]) {
dev_err(&ndev->dev, "VIF with mid %d already in use\n",
vif->mid);
return -EEXIST;
}
if (any_active && vif->mid != 0) {
rc = wmi_port_allocate(wil, vif->mid, ndev->dev_addr,
wdev->iftype);
if (rc)
return rc;
}
rc = cfg80211_register_netdevice(ndev);
if (rc < 0) {
dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
if (any_active && vif->mid != 0)
wmi_port_delete(wil, vif->mid);
return rc;
}
wil->vifs[vif->mid] = vif;
return 0;
}
int wil_if_add(struct wil6210_priv *wil)
{
struct wiphy *wiphy = wil->wiphy;
struct net_device *ndev = wil->main_ndev;
struct wil6210_vif *vif = ndev_to_vif(ndev);
int rc;
wil_dbg_misc(wil, "entered");
strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
rc = wiphy_register(wiphy);
if (rc < 0) {
wil_err(wil, "failed to register wiphy, err %d\n", rc);
return rc;
}
init_dummy_netdev(&wil->napi_ndev);
if (wil->use_enhanced_dma_hw) {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx_edma);
netif_napi_add_tx(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx_edma);
} else {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx);
netif_napi_add_tx(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx);
}
wil_update_net_queues_bh(wil, vif, NULL, true);
rtnl_lock();
wiphy_lock(wiphy);
rc = wil_vif_add(wil, vif);
wiphy_unlock(wiphy);
rtnl_unlock();
if (rc < 0)
goto out_wiphy;
return 0;
out_wiphy:
wiphy_unregister(wiphy);
return rc;
}
void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
{
struct wil6210_vif *vif;
struct net_device *ndev;
bool any_active = wil_has_active_ifaces(wil, true, false);
ASSERT_RTNL();
if (mid >= GET_MAX_VIFS(wil)) {
wil_err(wil, "invalid MID: %d\n", mid);
return;
}
vif = wil->vifs[mid];
if (!vif) {
wil_err(wil, "MID %d not registered\n", mid);
return;
}
mutex_lock(&wil->mutex);
wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING);
mutex_unlock(&wil->mutex);
ndev = vif_to_ndev(vif);
/* during unregister_netdevice cfg80211_leave may perform operations
* such as stop AP, disconnect, so we only clear the VIF afterwards
*/
cfg80211_unregister_netdevice(ndev);
if (any_active && vif->mid != 0)
wmi_port_delete(wil, vif->mid);
/* make sure no one is accessing the VIF before removing */
mutex_lock(&wil->vif_mutex);
wil->vifs[mid] = NULL;
/* ensure NAPI code will see the NULL VIF */
wmb();
if (test_bit(wil_status_napi_en, wil->status)) {
napi_synchronize(&wil->napi_rx);
napi_synchronize(&wil->napi_tx);
}
mutex_unlock(&wil->vif_mutex);
flush_work(&wil->wmi_event_worker);
del_timer_sync(&vif->connect_timer);
cancel_work_sync(&vif->disconnect_worker);
wil_probe_client_flush(vif);
cancel_work_sync(&vif->probe_client_worker);
cancel_work_sync(&vif->enable_tx_key_worker);
/* for VIFs, ndev will be freed by destructor after RTNL is unlocked.
* the main interface will be freed in wil_if_free, we need to keep it
* a bit longer so logging macros will work.
*/
}
void wil_if_remove(struct wil6210_priv *wil)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
wil_dbg_misc(wil, "if_remove\n");
rtnl_lock();
wiphy_lock(wiphy);
wil_vif_remove(wil, 0);
wiphy_unlock(wiphy);
rtnl_unlock();
netif_napi_del(&wil->napi_tx);
netif_napi_del(&wil->napi_rx);
wiphy_unregister(wiphy);
}
|
linux-master
|
drivers/net/wireless/ath/wil6210/netdev.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
|
linux-master
|
drivers/net/wireless/ath/ath12k/trace.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
#include "qmi.h"
#include "core.h"
#include "debug.h"
#include <linux/of.h>
#include <linux/firmware.h>
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
#define HOST_CSTATE_BIT 0x04
#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
#define ATH12K_QMI_MAX_CHUNK_SIZE 2097152
static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
chip_id),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
num_local_links),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
.elem_size = sizeof(u8),
.array_type = STATIC_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
hw_link_id),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
.elem_size = sizeof(u8),
.array_type = STATIC_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
valid_mlo_link_id),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
num_clients_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
num_clients),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
wake_msi_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
wake_msi),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
gpios_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
gpios_len),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
.elem_size = sizeof(u32),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
gpios),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
nm_modem_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
nm_modem),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
bdf_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
bdf_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
bdf_cache_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
bdf_cache_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
m3_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
m3_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
m3_cache_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
m3_cache_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_filesys_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_filesys_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_cache_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_cache_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_done_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_done),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mem_bucket_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mem_bucket),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1C,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mem_cfg_mode_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1C,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mem_cfg_mode),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1D,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_duration_valid),
},
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x1D,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
cal_duraiton),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1E,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
platform_name_valid),
},
{
.data_type = QMI_STRING,
.elem_len = QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x1E,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
platform_name),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1F,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
ddr_range_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01,
.elem_size = sizeof(struct qmi_wlanfw_host_ddr_range),
.array_type = STATIC_ARRAY,
.tlv_type = 0x1F,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
ddr_range),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x20,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
host_build_type_valid),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_host_build_type),
.array_type = NO_ARRAY,
.tlv_type = 0x20,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
host_build_type),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x21,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_capable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x21,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_capable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x22,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_chip_id_valid),
},
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x22,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_chip_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x23,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_group_id_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x23,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_group_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x24,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
max_mlo_peer_valid),
},
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x24,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
max_mlo_peer),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x25,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_num_chips_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x25,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_num_chips),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x26,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_chip_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLFW_MAX_NUM_MLO_CHIPS_V01,
.elem_size = sizeof(struct wlfw_host_mlo_chip_info_s_v01),
.array_type = STATIC_ARRAY,
.tlv_type = 0x26,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
mlo_chip_info),
.ei_array = wlfw_host_mlo_chip_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x27,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
feature_list_valid),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x27,
.offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
feature_list),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_ready_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_ready_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
initiate_cal_download_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
initiate_cal_download_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
initiate_cal_update_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
initiate_cal_update_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
msa_ready_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
msa_ready_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
pin_connect_result_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
pin_connect_result_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
client_id_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
client_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
request_mem_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
request_mem_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_mem_ready_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_mem_ready_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_init_done_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
fw_init_done_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
rejuvenate_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
rejuvenate_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
xo_cal_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
xo_cal_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
cal_done_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
cal_done_enable),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
fw_status_valid),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
fw_status),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
size),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
.elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
.ei_array = qmi_wlanfw_mem_cfg_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
mem_seg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
.elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
mem_seg),
.ei_array = qmi_wlanfw_mem_seg_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
mem_seg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
.elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
mem_seg),
.ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
chip_id),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
chip_family),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
board_id),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_dev_mem_info_s_v01,
start),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_dev_mem_info_s_v01,
size),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
fw_version),
},
{
.data_type = QMI_STRING,
.elem_len = ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
fw_build_timestamp),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
chip_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
chip_info),
.ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
board_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
board_info),
.ei_array = qmi_wlanfw_rf_board_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
soc_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
soc_info),
.ei_array = qmi_wlanfw_soc_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
fw_version_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
fw_version_info),
.ei_array = qmi_wlanfw_fw_version_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
fw_build_id_valid),
},
{
.data_type = QMI_STRING,
.elem_len = ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
fw_build_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
num_macs_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
num_macs),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
voltage_mv_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
voltage_mv),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
time_freq_hz_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
time_freq_hz),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
otp_version_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
otp_version),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
eeprom_caldata_read_timeout_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
eeprom_caldata_read_timeout),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
fw_caps_valid),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_caps),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
rd_card_chain_cap_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
rd_card_chain_cap),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1C,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
dev_mem_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01,
.elem_size = sizeof(struct qmi_wlanfw_dev_mem_info_s_v01),
.array_type = STATIC_ARRAY,
.tlv_type = 0x1C,
.offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, dev_mem),
.ei_array = qmi_wlanfw_dev_mem_info_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
valid),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
file_id_valid),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
file_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
total_size_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
total_size),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
seg_id_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
seg_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
data_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
data_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
data),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
end_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
end),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
bdf_type_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
bdf_type),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
pipe_num),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
pipe_dir),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
nentries),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
nbytes_max),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
flags),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
service_id),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
pipe_dir),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
pipe_num),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
},
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
offset),
},
{
.data_type = QMI_EOTI,
.array_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01,
addr),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
mode),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
hw_debug_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
hw_debug),
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
host_version_valid),
},
{
.data_type = QMI_STRING,
.elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
host_version),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
tgt_cfg_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
tgt_cfg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_CE_V01,
.elem_size = sizeof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
tgt_cfg),
.ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
svc_cfg_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
svc_cfg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_SVC_V01,
.elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
svc_cfg),
.ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
.elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg),
.ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_v3_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_v3_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01,
.elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
shadow_reg_v3),
.ei_array = qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req)
{
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
req->mlo_chip_id = 0;
req->mlo_group_id_valid = 1;
req->mlo_group_id = 0;
req->max_mlo_peer_valid = 1;
/* Max peer number generally won't change for the same device
* but needs to be synced with host driver.
*/
req->max_mlo_peer = 32;
req->mlo_num_chips_valid = 1;
req->mlo_num_chips = 1;
req->mlo_chip_info_valid = 1;
req->mlo_chip_info[0].chip_id = 0;
req->mlo_chip_info[0].num_local_links = 2;
req->mlo_chip_info[0].hw_link_id[0] = 0;
req->mlo_chip_info[0].hw_link_id[1] = 1;
req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
}
static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.num_clients_valid = 1;
req.num_clients = 1;
req.mem_cfg_mode = ab->qmi.target_mem_mode;
req.mem_cfg_mode_valid = 1;
req.bdf_support_valid = 1;
req.bdf_support = 1;
req.m3_support_valid = 1;
req.m3_support = 1;
req.m3_cache_support_valid = 1;
req.m3_cache_support = 1;
req.cal_done_valid = 1;
req.cal_done = ab->qmi.cal_done;
if (ab->hw_params->qmi_cnss_feature_bitmap) {
req.feature_list_valid = 1;
req.feature_list = ab->hw_params->qmi_cnss_feature_bitmap;
}
/* BRINGUP: here we are piggybacking a lot of stuff using
* internal_sleep_clock, should it be split?
*/
if (ab->hw_params->internal_sleep_clock) {
req.nm_modem_valid = 1;
/* Notify firmware that this is non-qualcomm platform. */
req.nm_modem |= HOST_CSTATE_BIT;
/* Notify firmware about the sleep clock selection,
* nm_modem_bit[1] is used for this purpose. Host driver on
* non-qualcomm platforms should select internal sleep
* clock.
*/
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
ath12k_host_cap_parse_mlo(&req);
}
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_HOST_CAP_REQ_V01,
QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
if (ret < 0) {
ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0)
goto out;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath12k_warn(ab, "Host capability request failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
return ret;
}
static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_ind_register_req_msg_v01 *req;
struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
struct qmi_handle *handle = &ab->qmi.handle;
struct qmi_txn txn;
int ret;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
resp = kzalloc(sizeof(*resp), GFP_KERNEL);
if (!resp) {
ret = -ENOMEM;
goto resp_out;
}
req->client_id_valid = 1;
req->client_id = QMI_WLANFW_CLIENT_ID;
req->fw_ready_enable_valid = 1;
req->fw_ready_enable = 1;
req->request_mem_enable_valid = 1;
req->request_mem_enable = 1;
req->fw_mem_ready_enable_valid = 1;
req->fw_mem_ready_enable = 1;
req->cal_done_enable_valid = 1;
req->cal_done_enable = 1;
req->fw_init_done_enable_valid = 1;
req->fw_init_done_enable = 1;
req->pin_connect_result_enable_valid = 0;
req->pin_connect_result_enable = 0;
ret = qmi_txn_init(handle, &txn,
qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_IND_REGISTER_REQ_V01,
QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_ind_register_req_msg_v01_ei, req);
if (ret < 0) {
ath12k_warn(ab, "Failed to send indication register request, err = %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath12k_warn(ab, "failed to register fw indication %d\n", ret);
goto out;
}
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
ath12k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n",
resp->resp.result, resp->resp.error);
ret = -EINVAL;
goto out;
}
out:
kfree(resp);
resp_out:
kfree(req);
return ret;
}
static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0, i;
bool delayed;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
memset(&resp, 0, sizeof(resp));
/* Some targets by default request a block of big contiguous
* DMA memory, it's hard to allocate from kernel. So host returns
* failure to firmware and firmware then request multiple blocks of
* small chunk size memory.
*/
if (ab->qmi.target_mem_delayed) {
delayed = true;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
memset(req, 0, sizeof(*req));
} else {
delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
for (i = 0; i < req->mem_seg_len ; i++) {
req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
req->mem_seg[i].size = ab->qmi.target_mem[i].size;
req->mem_seg[i].type = ab->qmi.target_mem[i].type;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"qmi req mem_seg[%d] %pad %u %u\n", i,
&ab->qmi.target_mem[i].paddr,
ab->qmi.target_mem[i].size,
ab->qmi.target_mem[i].type);
}
}
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_RESPOND_MEM_REQ_V01,
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath12k_warn(ab, "qmi failed memory request, err = %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
/* the error response is expected when
* target_mem_delayed is true.
*/
if (delayed && resp.resp.error == 0)
goto out;
ath12k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
kfree(req);
return ret;
}
static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
{
int i;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
if (!ab->qmi.target_mem[i].v.addr)
continue;
dma_free_coherent(ab->dev,
ab->qmi.target_mem[i].size,
ab->qmi.target_mem[i].v.addr,
ab->qmi.target_mem[i].paddr);
ab->qmi.target_mem[i].v.addr = NULL;
}
}
static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
{
int i;
struct target_mem_chunk *chunk;
ab->qmi.target_mem_delayed = false;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
/* Allocate memory for the region and the functionality supported
* on the host. For the non-supported memory region, host does not
* allocate memory, assigns NULL and FW will handle this without crashing.
*/
switch (chunk->type) {
case HOST_DDR_REGION_TYPE:
case M3_DUMP_REGION_TYPE:
case PAGEABLE_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
chunk->v.addr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!chunk->v.addr) {
if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) {
ab->qmi.target_mem_delayed = true;
ath12k_warn(ab,
"qmi dma allocation failed (%d B type %u), will try later with small size\n",
chunk->size,
chunk->type);
ath12k_qmi_free_target_mem_chunk(ab);
return 0;
}
ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
chunk->type, chunk->size);
return -ENOMEM;
}
break;
default:
ath12k_warn(ab, "memory type %u not supported\n",
chunk->type);
chunk->paddr = 0;
chunk->v.addr = NULL;
break;
}
}
return 0;
}
static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req;
struct qmi_wlanfw_cap_resp_msg_v01 resp;
struct qmi_txn txn = {};
unsigned int board_id = ATH12K_BOARD_ID_DEFAULT;
int ret = 0;
int i;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_CAP_REQ_V01,
QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_cap_req_msg_v01_ei, &req);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath12k_warn(ab, "qmi failed target cap request %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath12k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
if (resp.chip_info_valid) {
ab->qmi.target.chip_id = resp.chip_info.chip_id;
ab->qmi.target.chip_family = resp.chip_info.chip_family;
}
if (resp.board_info_valid)
ab->qmi.target.board_id = resp.board_info.board_id;
else
ab->qmi.target.board_id = board_id;
if (resp.soc_info_valid)
ab->qmi.target.soc_id = resp.soc_info.soc_id;
if (resp.fw_version_info_valid) {
ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
strscpy(ab->qmi.target.fw_build_timestamp,
resp.fw_version_info.fw_build_timestamp,
sizeof(ab->qmi.target.fw_build_timestamp));
}
if (resp.fw_build_id_valid)
strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
sizeof(ab->qmi.target.fw_build_id));
if (resp.dev_mem_info_valid) {
for (i = 0; i < ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) {
ab->qmi.dev_mem[i].start =
resp.dev_mem[i].start;
ab->qmi.dev_mem[i].size =
resp.dev_mem[i].size;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"devmem [%d] start ox%llx size %llu\n", i,
ab->qmi.dev_mem[i].start,
ab->qmi.dev_mem[i].size);
}
}
if (resp.eeprom_caldata_read_timeout_valid) {
ab->qmi.target.eeprom_caldata = resp.eeprom_caldata_read_timeout;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi cal data supported from eeprom\n");
}
ath12k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id);
ath12k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
ab->qmi.target.fw_version,
ab->qmi.target.fw_build_timestamp,
ab->qmi.target.fw_build_id);
out:
return ret;
}
static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
const u8 *data, u32 len, u8 type)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
struct qmi_txn txn = {};
const u8 *temp = data;
int ret;
u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
memset(&resp, 0, sizeof(resp));
while (remaining) {
req->valid = 1;
req->file_id_valid = 1;
req->file_id = ab->qmi.target.board_id;
req->total_size_valid = 1;
req->total_size = remaining;
req->seg_id_valid = 1;
req->data_valid = 1;
req->bdf_type = type;
req->bdf_type_valid = 1;
req->end_valid = 1;
req->end = 0;
if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
} else {
req->data_len = remaining;
req->end = 1;
}
if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
req->data_valid = 0;
req->end = 1;
req->data_len = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
} else {
memcpy(req->data, temp, req->data_len);
}
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_bdf_download_resp_msg_v01_ei,
&resp);
if (ret < 0)
goto out;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf download req fixed addr type %d\n",
type);
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0)
goto out;
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath12k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
remaining = 0;
} else {
remaining -= req->data_len;
temp += req->data_len;
req->seg_id++;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"qmi bdf download request remaining %i\n",
remaining);
}
}
out:
kfree(req);
return ret;
}
static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
enum ath12k_qmi_bdf_type type)
{
struct device *dev = ab->dev;
char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE];
const struct firmware *fw_entry;
struct ath12k_board_data bd;
u32 fw_size, file_type;
int ret = 0;
const u8 *tmp;
memset(&bd, 0, sizeof(bd));
switch (type) {
case ATH12K_QMI_BDF_TYPE_ELF:
ret = ath12k_core_fetch_bdf(ab, &bd);
if (ret) {
ath12k_warn(ab, "qmi failed to load bdf:\n");
goto out;
}
if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
type = ATH12K_QMI_BDF_TYPE_ELF;
else
type = ATH12K_QMI_BDF_TYPE_BIN;
break;
case ATH12K_QMI_BDF_TYPE_REGDB:
ret = ath12k_core_fetch_board_data_api_1(ab, &bd,
ATH12K_REGDB_FILE_NAME);
if (ret) {
ath12k_warn(ab, "qmi failed to load regdb bin:\n");
goto out;
}
break;
case ATH12K_QMI_BDF_TYPE_CALIBRATION:
if (ab->qmi.target.eeprom_caldata) {
file_type = ATH12K_QMI_FILE_TYPE_EEPROM;
tmp = filename;
fw_size = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
} else {
file_type = ATH12K_QMI_FILE_TYPE_CALDATA;
/* cal-<bus>-<id>.bin */
snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
ath12k_bus_str(ab->hif.bus), dev_name(dev));
fw_entry = ath12k_core_firmware_request(ab, filename);
if (!IS_ERR(fw_entry))
goto success;
fw_entry = ath12k_core_firmware_request(ab,
ATH12K_DEFAULT_CAL_FILE);
if (IS_ERR(fw_entry)) {
ret = PTR_ERR(fw_entry);
ath12k_warn(ab,
"qmi failed to load CAL data file:%s\n",
filename);
goto out;
}
success:
fw_size = min_t(u32, ab->hw_params->fw.board_size,
fw_entry->size);
tmp = fw_entry->data;
}
ret = ath12k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to load caldata\n");
goto out_qmi_cal;
}
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi caldata downloaded: type: %u\n",
file_type);
out_qmi_cal:
if (!ab->qmi.target.eeprom_caldata)
release_firmware(fw_entry);
return ret;
default:
ath12k_warn(ab, "unknown file type for load %d", type);
goto out;
}
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf_type %d\n", type);
fw_size = min_t(u32, ab->hw_params->fw.board_size, bd.len);
ret = ath12k_qmi_load_file_target_mem(ab, bd.data, fw_size, type);
if (ret < 0)
ath12k_warn(ab, "qmi failed to load bdf file\n");
out:
ath12k_core_free_bdf(ab, &bd);
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi BDF download sequence completed\n");
return ret;
}
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
const struct firmware *fw;
char path[100];
int ret;
if (m3_mem->vaddr || m3_mem->size)
return 0;
fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
if (IS_ERR(fw)) {
ret = PTR_ERR(fw);
ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
path, sizeof(path));
ath12k_err(ab, "failed to load %s: %d\n", path, ret);
return ret;
}
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
fw->size, &m3_mem->paddr,
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n",
fw->size);
release_firmware(fw);
return -ENOMEM;
}
memcpy(m3_mem->vaddr, fw->data, fw->size);
m3_mem->size = fw->size;
release_firmware(fw);
return 0;
}
static void ath12k_qmi_m3_free(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
if (!m3_mem->vaddr)
return;
dma_free_coherent(ab->dev, m3_mem->size,
m3_mem->vaddr, m3_mem->paddr);
m3_mem->vaddr = NULL;
}
static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
ret = ath12k_qmi_m3_load(ab);
if (ret) {
ath12k_err(ab, "failed to load m3 firmware: %d", ret);
return ret;
}
req.addr = m3_mem->paddr;
req.size = m3_mem->size;
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_M3_INFO_REQ_V01,
QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath12k_warn(ab, "qmi failed M3 information request %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath12k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
return ret;
}
static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
u32 mode)
{
struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.mode = mode;
req.hw_debug_valid = 1;
req.hw_debug = 0;
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_WLAN_MODE_REQ_V01,
QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
mode, ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
if (mode == ATH12K_FIRMWARE_MODE_OFF && ret == -ENETRESET) {
ath12k_warn(ab, "WLFW service is dis-connected\n");
return 0;
}
ath12k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n",
mode, ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath12k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n",
mode, resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
return ret;
}
static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
struct ce_pipe_config *ce_cfg;
struct service_to_pipe *svc_cfg;
struct qmi_txn txn = {};
int ret = 0, pipe_num;
ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
memset(&resp, 0, sizeof(resp));
req->host_version_valid = 1;
strscpy(req->host_version, ATH12K_HOST_VERSION_STRING,
sizeof(req->host_version));
req->tgt_cfg_valid = 1;
/* This is number of CE configs */
req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
}
req->svc_cfg_valid = 1;
/* This is number of Service/CE configs */
req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
}
/* set shadow v3 configuration */
if (ab->hw_params->supports_shadow_regs) {
req->shadow_reg_v3_valid = 1;
req->shadow_reg_v3_len = min_t(u32,
ab->qmi.ce_cfg.shadow_reg_v3_len,
QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01);
memcpy(&req->shadow_reg_v3, ab->qmi.ce_cfg.shadow_reg_v3,
sizeof(u32) * req->shadow_reg_v3_len);
} else {
req->shadow_reg_v3_valid = 0;
}
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_WLAN_CFG_REQ_V01,
QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
ret);
goto out;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
if (ret < 0) {
ath12k_warn(ab, "qmi failed wlan config request, err = %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath12k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
goto out;
}
out:
kfree(req);
return ret;
}
void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan mode off\n");
return;
}
}
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode)
{
int ret;
ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
return ret;
}
ret = ath12k_qmi_wlanfw_mode_send(ab, mode);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
return ret;
}
return 0;
}
static int
ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi,
enum ath12k_qmi_event_type type,
void *data)
{
struct ath12k_qmi_driver_event *event;
event = kzalloc(sizeof(*event), GFP_ATOMIC);
if (!event)
return -ENOMEM;
event->type = type;
event->data = data;
spin_lock(&qmi->event_lock);
list_add_tail(&event->list, &qmi->event_list);
spin_unlock(&qmi->event_lock);
queue_work(qmi->event_wq, &qmi->event_work);
return 0;
}
static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
ret = ath12k_qmi_fw_ind_register_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
return ret;
}
ret = ath12k_qmi_host_cap_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
return ret;
}
return ret;
}
static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
ret = ath12k_qmi_respond_fw_mem_request(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret);
return ret;
}
return ret;
}
static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
ret = ath12k_qmi_request_target_cap(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to req target capabilities:%d\n", ret);
return ret;
}
ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_REGDB);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to load regdb file:%d\n", ret);
return ret;
}
ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_ELF);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to load board data file:%d\n", ret);
return ret;
}
if (ab->hw_params->download_calib) {
ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_CALIBRATION);
if (ret < 0)
ath12k_warn(ab, "qmi failed to load calibrated data :%d\n", ret);
}
ret = ath12k_qmi_wlanfw_m3_info_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send m3 info req:%d\n", ret);
return ret;
}
return ret;
}
static void ath12k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *data)
{
struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
struct ath12k_base *ab = qmi->ab;
const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
int i, ret;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware request memory request\n");
if (msg->mem_seg_len == 0 ||
msg->mem_seg_len > ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
ath12k_warn(ab, "Invalid memory segment length: %u\n",
msg->mem_seg_len);
ab->qmi.mem_seg_count = msg->mem_seg_len;
for (i = 0; i < qmi->mem_seg_count ; i++) {
ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi mem seg type %d size %d\n",
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
ret = ath12k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
ret);
return;
}
ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_REQUEST_MEM, NULL);
}
static void ath12k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *decoded)
{
struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
struct ath12k_base *ab = qmi->ab;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware memory ready indication\n");
ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_MEM_READY, NULL);
}
static void ath12k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn,
const void *decoded)
{
struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
struct ath12k_base *ab = qmi->ab;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware ready\n");
ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_READY, NULL);
}
static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
.ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
.decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01),
.fn = ath12k_qmi_msg_mem_request_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
.ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
.decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01),
.fn = ath12k_qmi_msg_mem_ready_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_READY_IND_V01,
.ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
.decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
.fn = ath12k_qmi_msg_fw_ready_cb,
},
};
static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
struct qmi_service *service)
{
struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
struct ath12k_base *ab = qmi->ab;
struct sockaddr_qrtr *sq = &qmi->sq;
int ret;
sq->sq_family = AF_QIPCRTR;
sq->sq_node = service->node;
sq->sq_port = service->port;
ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
sizeof(*sq), 0);
if (ret) {
ath12k_warn(ab, "qmi failed to connect to remote service %d\n", ret);
return ret;
}
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw qmi service connected\n");
ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_SERVER_ARRIVE, NULL);
return ret;
}
static void ath12k_qmi_ops_del_server(struct qmi_handle *qmi_hdl,
struct qmi_service *service)
{
struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
struct ath12k_base *ab = qmi->ab;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw del server\n");
ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_SERVER_EXIT, NULL);
}
static const struct qmi_ops ath12k_qmi_ops = {
.new_server = ath12k_qmi_ops_new_server,
.del_server = ath12k_qmi_ops_del_server,
};
static void ath12k_qmi_driver_event_work(struct work_struct *work)
{
struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
event_work);
struct ath12k_qmi_driver_event *event;
struct ath12k_base *ab = qmi->ab;
int ret;
spin_lock(&qmi->event_lock);
while (!list_empty(&qmi->event_list)) {
event = list_first_entry(&qmi->event_list,
struct ath12k_qmi_driver_event, list);
list_del(&event->list);
spin_unlock(&qmi->event_lock);
if (test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))
goto skip;
switch (event->type) {
case ATH12K_QMI_EVENT_SERVER_ARRIVE:
ret = ath12k_qmi_event_server_arrive(qmi);
if (ret < 0)
set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
break;
case ATH12K_QMI_EVENT_SERVER_EXIT:
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
break;
case ATH12K_QMI_EVENT_REQUEST_MEM:
ret = ath12k_qmi_event_mem_request(qmi);
if (ret < 0)
set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
break;
case ATH12K_QMI_EVENT_FW_MEM_READY:
ret = ath12k_qmi_event_load_bdf(qmi);
if (ret < 0)
set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
break;
case ATH12K_QMI_EVENT_FW_READY:
clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
ath12k_hal_dump_srng_stats(ab);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
clear_bit(ATH12K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
ath12k_core_qmi_firmware_ready(ab);
set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
break;
default:
ath12k_warn(ab, "invalid event type: %d", event->type);
break;
}
skip:
kfree(event);
spin_lock(&qmi->event_lock);
}
spin_unlock(&qmi->event_lock);
}
int ath12k_qmi_init_service(struct ath12k_base *ab)
{
int ret;
memset(&ab->qmi.target, 0, sizeof(struct target_info));
memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
ab->qmi.ab = ab;
ab->qmi.target_mem_mode = ATH12K_QMI_TARGET_MEM_MODE_DEFAULT;
ret = qmi_handle_init(&ab->qmi.handle, ATH12K_QMI_RESP_LEN_MAX,
&ath12k_qmi_ops, ath12k_qmi_msg_handlers);
if (ret < 0) {
ath12k_warn(ab, "failed to initialize qmi handle\n");
return ret;
}
ab->qmi.event_wq = alloc_ordered_workqueue("ath12k_qmi_driver_event", 0);
if (!ab->qmi.event_wq) {
ath12k_err(ab, "failed to allocate workqueue\n");
return -EFAULT;
}
INIT_LIST_HEAD(&ab->qmi.event_list);
spin_lock_init(&ab->qmi.event_lock);
INIT_WORK(&ab->qmi.event_work, ath12k_qmi_driver_event_work);
ret = qmi_add_lookup(&ab->qmi.handle, ATH12K_QMI_WLFW_SERVICE_ID_V01,
ATH12K_QMI_WLFW_SERVICE_VERS_V01,
ab->qmi.service_ins_id);
if (ret < 0) {
ath12k_warn(ab, "failed to add qmi lookup\n");
destroy_workqueue(ab->qmi.event_wq);
return ret;
}
return ret;
}
void ath12k_qmi_deinit_service(struct ath12k_base *ab)
{
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
ath12k_qmi_m3_free(ab);
ath12k_qmi_free_target_mem_chunk(ab);
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/qmi.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include "debug.h"
#include "core.h"
#include "ce.h"
#include "hw.h"
#include "mhi.h"
#include "dp_rx.h"
static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx)
{
return pdev_idx;
}
static int ath12k_hw_mac_id_to_pdev_id_qcn9274(const struct ath12k_hw_params *hw,
int mac_id)
{
return mac_id;
}
static int ath12k_hw_mac_id_to_srng_id_qcn9274(const struct ath12k_hw_params *hw,
int mac_id)
{
return 0;
}
static u8 ath12k_hw_get_ring_selector_qcn9274(struct sk_buff *skb)
{
return smp_processor_id();
}
static bool ath12k_dp_srng_is_comp_ring_qcn9274(int ring_num)
{
if (ring_num < 3 || ring_num == 4)
return true;
return false;
}
static int ath12k_hw_mac_id_to_pdev_id_wcn7850(const struct ath12k_hw_params *hw,
int mac_id)
{
return 0;
}
static int ath12k_hw_mac_id_to_srng_id_wcn7850(const struct ath12k_hw_params *hw,
int mac_id)
{
return mac_id;
}
static u8 ath12k_hw_get_ring_selector_wcn7850(struct sk_buff *skb)
{
return skb_get_queue_mapping(skb);
}
static bool ath12k_dp_srng_is_comp_ring_wcn7850(int ring_num)
{
if (ring_num == 0 || ring_num == 2 || ring_num == 4)
return true;
return false;
}
static const struct ath12k_hw_ops qcn9274_ops = {
.get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
.mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_qcn9274,
.mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_qcn9274,
.rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcn9274,
.get_ring_selector = ath12k_hw_get_ring_selector_qcn9274,
.dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_qcn9274,
};
static const struct ath12k_hw_ops wcn7850_ops = {
.get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
.mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_wcn7850,
.mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_wcn7850,
.rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_wcn7850,
.get_ring_selector = ath12k_hw_get_ring_selector_wcn7850,
.dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_wcn7850,
};
#define ATH12K_TX_RING_MASK_0 0x1
#define ATH12K_TX_RING_MASK_1 0x2
#define ATH12K_TX_RING_MASK_2 0x4
#define ATH12K_TX_RING_MASK_3 0x8
#define ATH12K_TX_RING_MASK_4 0x10
#define ATH12K_RX_RING_MASK_0 0x1
#define ATH12K_RX_RING_MASK_1 0x2
#define ATH12K_RX_RING_MASK_2 0x4
#define ATH12K_RX_RING_MASK_3 0x8
#define ATH12K_RX_ERR_RING_MASK_0 0x1
#define ATH12K_RX_WBM_REL_RING_MASK_0 0x1
#define ATH12K_REO_STATUS_RING_MASK_0 0x1
#define ATH12K_HOST2RXDMA_RING_MASK_0 0x1
#define ATH12K_RX_MON_RING_MASK_0 0x1
#define ATH12K_RX_MON_RING_MASK_1 0x2
#define ATH12K_RX_MON_RING_MASK_2 0x4
#define ATH12K_TX_MON_RING_MASK_0 0x1
#define ATH12K_TX_MON_RING_MASK_1 0x2
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI (mac0) */
{
.pipenum = __cpu_to_le32(3),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
.pipenum = __cpu_to_le32(4),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(256),
.nbytes_max = __cpu_to_le32(256),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE5: target->host Pktlog */
{
.pipenum = __cpu_to_le32(5),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(6),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE7: host->target WMI (mac1) */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE8: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE9, 10 and 11: Reserved for MHI */
/* CE12: Target CV prefetch */
{
.pipenum = __cpu_to_le32(12),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE13: Target CV prefetch */
{
.pipenum = __cpu_to_le32(13),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE14: WMI logging/CFR/Spectral/Radar */
{
.pipenum = __cpu_to_le32(14),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE15: Reserved */
};
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config ath12k_target_ce_config_wlan_wcn7850[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI */
{
.pipenum = __cpu_to_le32(3),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
.pipenum = __cpu_to_le32(4),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(256),
.nbytes_max = __cpu_to_le32(256),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE5: target->host Pktlog */
{
.pipenum = __cpu_to_le32(5),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(6),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE7 used only by Host */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
.nentries = __cpu_to_le32(0),
.nbytes_max = __cpu_to_le32(0),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE8 target->host used only by IPA */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE 9, 10, 11 are used by MHI driver */
};
/* Map from service/endpoint to Copy Engine.
* This table is derived from the CE_PCI TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_qcn9274[] = {
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(0),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(1),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(0),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(1),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(4),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(1),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(7),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(5),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(14),
},
/* (Additions here) */
{ /* must be last */
__cpu_to_le32(0),
__cpu_to_le32(0),
__cpu_to_le32(0),
},
};
static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_wcn7850[] = {
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(3),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(0),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(2),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
__cpu_to_le32(4),
},
{
__cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
__cpu_to_le32(1),
},
/* (Additions here) */
{ /* must be last */
__cpu_to_le32(0),
__cpu_to_le32(0),
__cpu_to_le32(0),
},
};
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
.tx = {
ATH12K_TX_RING_MASK_0,
ATH12K_TX_RING_MASK_1,
ATH12K_TX_RING_MASK_2,
ATH12K_TX_RING_MASK_3,
},
.rx_mon_dest = {
0, 0, 0,
ATH12K_RX_MON_RING_MASK_0,
ATH12K_RX_MON_RING_MASK_1,
ATH12K_RX_MON_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
ATH12K_RX_RING_MASK_0,
ATH12K_RX_RING_MASK_1,
ATH12K_RX_RING_MASK_2,
ATH12K_RX_RING_MASK_3,
},
.rx_err = {
0, 0, 0,
ATH12K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
0, 0, 0,
ATH12K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
0, 0, 0,
ATH12K_REO_STATUS_RING_MASK_0,
},
.host2rxdma = {
0, 0, 0,
ATH12K_HOST2RXDMA_RING_MASK_0,
},
.tx_mon_dest = {
ATH12K_TX_MON_RING_MASK_0,
ATH12K_TX_MON_RING_MASK_1,
},
};
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
.tx = {
ATH12K_TX_RING_MASK_0,
ATH12K_TX_RING_MASK_2,
ATH12K_TX_RING_MASK_4,
},
.rx_mon_dest = {
},
.rx = {
0, 0, 0,
ATH12K_RX_RING_MASK_0,
ATH12K_RX_RING_MASK_1,
ATH12K_RX_RING_MASK_2,
ATH12K_RX_RING_MASK_3,
},
.rx_err = {
ATH12K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
ATH12K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
ATH12K_REO_STATUS_RING_MASK_0,
},
.host2rxdma = {
},
.tx_mon_dest = {
},
};
static const struct ath12k_hw_regs qcn9274_v1_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_id = 0x00000908,
.hal_tcl1_ring_misc = 0x00000910,
.hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
.hal_tcl1_ring_tp_addr_msb = 0x00000920,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
.hal_tcl1_ring_msi1_base_lsb = 0x00000948,
.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
.hal_tcl1_ring_msi1_data = 0x00000950,
.hal_tcl_ring_base_lsb = 0x00000b58,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000d38,
.hal_wbm_idle_ring_base_lsb = 0x00000d0c,
.hal_wbm_idle_ring_misc_addr = 0x00000d1c,
.hal_wbm_r0_idle_list_cntl_addr = 0x00000210,
.hal_wbm_r0_idle_list_size_addr = 0x00000214,
.hal_wbm_scattered_ring_base_lsb = 0x00000220,
.hal_wbm_scattered_ring_base_msb = 0x00000224,
.hal_wbm_scattered_desc_head_info_ix0 = 0x00000230,
.hal_wbm_scattered_desc_head_info_ix1 = 0x00000234,
.hal_wbm_scattered_desc_tail_info_ix0 = 0x00000240,
.hal_wbm_scattered_desc_tail_info_ix1 = 0x00000244,
.hal_wbm_scattered_desc_ptr_hp_addr = 0x0000024c,
.hal_wbm_sw_release_ring_base_lsb = 0x0000034c,
.hal_wbm_sw1_release_ring_base_lsb = 0x000003c4,
.hal_wbm0_release_ring_base_lsb = 0x00000dd8,
.hal_wbm1_release_ring_base_lsb = 0x00000e50,
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
.pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
/* PPE release ring address */
.hal_ppe_rel_ring_base = 0x0000043c,
/* REO DEST ring address */
.hal_reo2_ring_base = 0x0000055c,
.hal_reo1_misc_ctrl_addr = 0x00000b7c,
.hal_reo1_sw_cookie_cfg0 = 0x00000050,
.hal_reo1_sw_cookie_cfg1 = 0x00000054,
.hal_reo1_qdesc_lut_base0 = 0x00000058,
.hal_reo1_qdesc_lut_base1 = 0x0000005c,
.hal_reo1_ring_base_lsb = 0x000004e4,
.hal_reo1_ring_base_msb = 0x000004e8,
.hal_reo1_ring_id = 0x000004ec,
.hal_reo1_ring_misc = 0x000004f4,
.hal_reo1_ring_hp_addr_lsb = 0x000004f8,
.hal_reo1_ring_hp_addr_msb = 0x000004fc,
.hal_reo1_ring_producer_int_setup = 0x00000508,
.hal_reo1_ring_msi1_base_lsb = 0x0000052C,
.hal_reo1_ring_msi1_base_msb = 0x00000530,
.hal_reo1_ring_msi1_data = 0x00000534,
.hal_reo1_aging_thres_ix0 = 0x00000b08,
.hal_reo1_aging_thres_ix1 = 0x00000b0c,
.hal_reo1_aging_thres_ix2 = 0x00000b10,
.hal_reo1_aging_thres_ix3 = 0x00000b14,
/* REO Exception ring address */
.hal_reo2_sw0_ring_base = 0x000008a4,
/* REO Reinject ring address */
.hal_sw2reo_ring_base = 0x00000304,
.hal_sw2reo1_ring_base = 0x0000037c,
/* REO cmd ring address */
.hal_reo_cmd_ring_base = 0x0000028c,
/* REO status ring address */
.hal_reo_status_ring_base = 0x00000a84,
};
static const struct ath12k_hw_regs qcn9274_v2_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_id = 0x00000908,
.hal_tcl1_ring_misc = 0x00000910,
.hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
.hal_tcl1_ring_tp_addr_msb = 0x00000920,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
.hal_tcl1_ring_msi1_base_lsb = 0x00000948,
.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
.hal_tcl1_ring_msi1_data = 0x00000950,
.hal_tcl_ring_base_lsb = 0x00000b58,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000d38,
/* WBM idle link ring address */
.hal_wbm_idle_ring_base_lsb = 0x00000d3c,
.hal_wbm_idle_ring_misc_addr = 0x00000d4c,
.hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
.hal_wbm_r0_idle_list_size_addr = 0x00000244,
.hal_wbm_scattered_ring_base_lsb = 0x00000250,
.hal_wbm_scattered_ring_base_msb = 0x00000254,
.hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
.hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
.hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
.hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
.hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
/* SW2WBM release ring address */
.hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
.hal_wbm_sw1_release_ring_base_lsb = 0x000003f4,
/* WBM2SW release ring address */
.hal_wbm0_release_ring_base_lsb = 0x00000e08,
.hal_wbm1_release_ring_base_lsb = 0x00000e80,
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
.pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
/* PPE release ring address */
.hal_ppe_rel_ring_base = 0x0000046c,
/* REO DEST ring address */
.hal_reo2_ring_base = 0x00000578,
.hal_reo1_misc_ctrl_addr = 0x00000b9c,
.hal_reo1_sw_cookie_cfg0 = 0x0000006c,
.hal_reo1_sw_cookie_cfg1 = 0x00000070,
.hal_reo1_qdesc_lut_base0 = 0x00000074,
.hal_reo1_qdesc_lut_base1 = 0x00000078,
.hal_reo1_ring_base_lsb = 0x00000500,
.hal_reo1_ring_base_msb = 0x00000504,
.hal_reo1_ring_id = 0x00000508,
.hal_reo1_ring_misc = 0x00000510,
.hal_reo1_ring_hp_addr_lsb = 0x00000514,
.hal_reo1_ring_hp_addr_msb = 0x00000518,
.hal_reo1_ring_producer_int_setup = 0x00000524,
.hal_reo1_ring_msi1_base_lsb = 0x00000548,
.hal_reo1_ring_msi1_base_msb = 0x0000054C,
.hal_reo1_ring_msi1_data = 0x00000550,
.hal_reo1_aging_thres_ix0 = 0x00000B28,
.hal_reo1_aging_thres_ix1 = 0x00000B2C,
.hal_reo1_aging_thres_ix2 = 0x00000B30,
.hal_reo1_aging_thres_ix3 = 0x00000B34,
/* REO Exception ring address */
.hal_reo2_sw0_ring_base = 0x000008c0,
/* REO Reinject ring address */
.hal_sw2reo_ring_base = 0x00000320,
.hal_sw2reo1_ring_base = 0x00000398,
/* REO cmd ring address */
.hal_reo_cmd_ring_base = 0x000002A8,
/* REO status ring address */
.hal_reo_status_ring_base = 0x00000aa0,
};
static const struct ath12k_hw_regs wcn7850_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_id = 0x00000908,
.hal_tcl1_ring_misc = 0x00000910,
.hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
.hal_tcl1_ring_tp_addr_msb = 0x00000920,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
.hal_tcl1_ring_msi1_base_lsb = 0x00000948,
.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
.hal_tcl1_ring_msi1_data = 0x00000950,
.hal_tcl_ring_base_lsb = 0x00000b58,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000d38,
.hal_wbm_idle_ring_base_lsb = 0x00000d3c,
.hal_wbm_idle_ring_misc_addr = 0x00000d4c,
.hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
.hal_wbm_r0_idle_list_size_addr = 0x00000244,
.hal_wbm_scattered_ring_base_lsb = 0x00000250,
.hal_wbm_scattered_ring_base_msb = 0x00000254,
.hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
.hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
.hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
.hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
.hal_wbm_scattered_desc_ptr_hp_addr = 0x00000027c,
.hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
.hal_wbm_sw1_release_ring_base_lsb = 0x00000284,
.hal_wbm0_release_ring_base_lsb = 0x00000e08,
.hal_wbm1_release_ring_base_lsb = 0x00000e80,
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
/* PPE release ring address */
.hal_ppe_rel_ring_base = 0x0000043c,
/* REO DEST ring address */
.hal_reo2_ring_base = 0x0000055c,
.hal_reo1_misc_ctrl_addr = 0x00000b7c,
.hal_reo1_sw_cookie_cfg0 = 0x00000050,
.hal_reo1_sw_cookie_cfg1 = 0x00000054,
.hal_reo1_qdesc_lut_base0 = 0x00000058,
.hal_reo1_qdesc_lut_base1 = 0x0000005c,
.hal_reo1_ring_base_lsb = 0x000004e4,
.hal_reo1_ring_base_msb = 0x000004e8,
.hal_reo1_ring_id = 0x000004ec,
.hal_reo1_ring_misc = 0x000004f4,
.hal_reo1_ring_hp_addr_lsb = 0x000004f8,
.hal_reo1_ring_hp_addr_msb = 0x000004fc,
.hal_reo1_ring_producer_int_setup = 0x00000508,
.hal_reo1_ring_msi1_base_lsb = 0x0000052C,
.hal_reo1_ring_msi1_base_msb = 0x00000530,
.hal_reo1_ring_msi1_data = 0x00000534,
.hal_reo1_aging_thres_ix0 = 0x00000b08,
.hal_reo1_aging_thres_ix1 = 0x00000b0c,
.hal_reo1_aging_thres_ix2 = 0x00000b10,
.hal_reo1_aging_thres_ix3 = 0x00000b14,
/* REO Exception ring address */
.hal_reo2_sw0_ring_base = 0x000008a4,
/* REO Reinject ring address */
.hal_sw2reo_ring_base = 0x00000304,
.hal_sw2reo1_ring_base = 0x0000037c,
/* REO cmd ring address */
.hal_reo_cmd_ring_base = 0x0000028c,
/* REO status ring address */
.hal_reo_status_ring_base = 0x00000a84,
};
static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
.wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
};
static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
.wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
};
static const struct ath12k_hw_params ath12k_hw_params[] = {
{
.name = "qcn9274 hw1.0",
.hw_rev = ATH12K_HW_QCN9274_HW10,
.fw = {
.dir = "QCN9274/hw1.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 1,
.single_pdev_only = false,
.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
.internal_sleep_clock = false,
.hw_ops = &qcn9274_ops,
.ring_mask = &ath12k_hw_ring_mask_qcn9274,
.regs = &qcn9274_v1_regs,
.host_ce_config = ath12k_host_ce_config_qcn9274,
.ce_count = 16,
.target_ce_config = ath12k_target_ce_config_wlan_qcn9274,
.target_ce_count = 12,
.svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274,
.svc_to_ce_map_len = 18,
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.idle_ps = false,
.download_calib = true,
.supports_suspend = false,
.tcl_ring_retry = true,
.reoq_lut_support = false,
.supports_shadow_regs = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
.mhi_config = &ath12k_mhi_config_qcn9274,
.wmi_init = ath12k_wmi_init_qcn9274,
.hal_ops = &hal_qcn9274_ops,
.qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
},
{
.name = "wcn7850 hw2.0",
.hw_rev = ATH12K_HW_WCN7850_HW20,
.fw = {
.dir = "WCN7850/hw2.0",
.board_size = 256 * 1024,
.cal_offset = 256 * 1024,
},
.max_radios = 1,
.single_pdev_only = true,
.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850,
.internal_sleep_clock = true,
.hw_ops = &wcn7850_ops,
.ring_mask = &ath12k_hw_ring_mask_wcn7850,
.regs = &wcn7850_regs,
.host_ce_config = ath12k_host_ce_config_wcn7850,
.ce_count = 9,
.target_ce_config = ath12k_target_ce_config_wlan_wcn7850,
.target_ce_count = 9,
.svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_wcn7850,
.svc_to_ce_map_len = 14,
.hal_params = &ath12k_hw_hal_params_wcn7850,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
.num_rxdma_dst_ring = 1,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.interface_modes = BIT(NL80211_IFTYPE_STATION),
.supports_monitor = false,
.idle_ps = true,
.download_calib = false,
.supports_suspend = false,
.tcl_ring_retry = false,
.reoq_lut_support = false,
.supports_shadow_regs = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850),
.num_tcl_banks = 7,
.max_tx_ring = 3,
.mhi_config = &ath12k_mhi_config_wcn7850,
.wmi_init = ath12k_wmi_init_wcn7850,
.hal_ops = &hal_wcn7850_ops,
.qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) |
BIT(CNSS_PCIE_PERST_NO_PULL_V01),
},
{
.name = "qcn9274 hw2.0",
.hw_rev = ATH12K_HW_QCN9274_HW20,
.fw = {
.dir = "QCN9274/hw2.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
.max_radios = 1,
.single_pdev_only = false,
.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
.internal_sleep_clock = false,
.hw_ops = &qcn9274_ops,
.ring_mask = &ath12k_hw_ring_mask_qcn9274,
.regs = &qcn9274_v2_regs,
.host_ce_config = ath12k_host_ce_config_qcn9274,
.ce_count = 16,
.target_ce_config = ath12k_target_ce_config_wlan_qcn9274,
.target_ce_count = 12,
.svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274,
.svc_to_ce_map_len = 18,
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.supports_monitor = false,
.idle_ps = false,
.download_calib = true,
.supports_suspend = false,
.tcl_ring_retry = true,
.reoq_lut_support = false,
.supports_shadow_regs = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
.mhi_config = &ath12k_mhi_config_qcn9274,
.wmi_init = ath12k_wmi_init_qcn9274,
.hal_ops = &hal_qcn9274_ops,
.qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
},
};
int ath12k_hw_init(struct ath12k_base *ab)
{
const struct ath12k_hw_params *hw_params = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(ath12k_hw_params); i++) {
hw_params = &ath12k_hw_params[i];
if (hw_params->hw_rev == ab->hw_rev)
break;
}
if (i == ARRAY_SIZE(ath12k_hw_params)) {
ath12k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev);
return -EINVAL;
}
ab->hw_params = hw_params;
ath12k_info(ab, "Hardware name: %s\n", ab->hw_params->name);
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/hw.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <net/mac80211.h>
#include <net/cfg80211.h>
#include <linux/completion.h>
#include <linux/if_ether.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/uuid.h>
#include <linux/time.h>
#include <linux/of.h>
#include "core.h"
#include "debug.h"
#include "mac.h"
#include "hw.h"
#include "peer.h"
struct ath12k_wmi_svc_ready_parse {
bool wmi_svc_bitmap_done;
};
struct ath12k_wmi_dma_ring_caps_parse {
struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
u32 n_dma_ring_caps;
};
struct ath12k_wmi_service_ext_arg {
u32 default_conc_scan_config_bits;
u32 default_fw_config_bits;
struct ath12k_wmi_ppe_threshold_arg ppet;
u32 he_cap_info;
u32 mpdu_density;
u32 max_bssid_rx_filters;
u32 num_hw_modes;
u32 num_phy;
};
struct ath12k_wmi_svc_rdy_ext_parse {
struct ath12k_wmi_service_ext_arg arg;
const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
u32 n_hw_mode_caps;
u32 tot_phy_id;
struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
u32 n_mac_phy_caps;
const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
u32 n_ext_hal_reg_caps;
struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
bool hw_mode_done;
bool mac_phy_done;
bool ext_hal_reg_done;
bool mac_phy_chainmask_combo_done;
bool mac_phy_chainmask_cap_done;
bool oem_dma_ring_cap_done;
bool dma_ring_cap_done;
};
struct ath12k_wmi_svc_rdy_ext2_arg {
u32 reg_db_version;
u32 hw_min_max_tx_power_2ghz;
u32 hw_min_max_tx_power_5ghz;
u32 chwidth_num_peer_caps;
u32 preamble_puncture_bw;
u32 max_user_per_ppdu_ofdma;
u32 max_user_per_ppdu_mumimo;
u32 target_cap_flags;
u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
u32 max_num_linkview_peers;
u32 max_num_msduq_supported_per_tid;
u32 default_num_msduq_supported_per_tid;
};
struct ath12k_wmi_svc_rdy_ext2_parse {
struct ath12k_wmi_svc_rdy_ext2_arg arg;
struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
bool dma_ring_cap_done;
bool spectral_bin_scaling_done;
bool mac_phy_caps_ext_done;
};
struct ath12k_wmi_rdy_parse {
u32 num_extra_mac_addr;
};
struct ath12k_wmi_dma_buf_release_arg {
struct ath12k_wmi_dma_buf_release_fixed_params fixed;
const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
u32 num_buf_entry;
u32 num_meta;
bool buf_entry_done;
bool meta_data_done;
};
struct ath12k_wmi_tlv_policy {
size_t min_len;
};
struct wmi_tlv_mgmt_rx_parse {
const struct ath12k_wmi_mgmt_rx_params *fixed;
const u8 *frame_buf;
bool frame_buf_done;
};
static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
[WMI_TAG_SERVICE_READY_EVENT] = {
.min_len = sizeof(struct wmi_service_ready_event) },
[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
.min_len = sizeof(struct wmi_service_ready_ext_event) },
[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
.min_len = sizeof(struct wmi_vdev_start_resp_event) },
[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_peer_delete_resp_event) },
[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
.min_len = sizeof(struct wmi_bcn_tx_status_event) },
[WMI_TAG_VDEV_STOPPED_EVENT] = {
.min_len = sizeof(struct wmi_vdev_stopped_event) },
[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
[WMI_TAG_MGMT_RX_HDR] = {
.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
[WMI_TAG_SCAN_EVENT] = {
.min_len = sizeof(struct wmi_scan_event) },
[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
[WMI_TAG_ROAM_EVENT] = {
.min_len = sizeof(struct wmi_roam_event) },
[WMI_TAG_CHAN_INFO_EVENT] = {
.min_len = sizeof(struct wmi_chan_info_event) },
[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
[WMI_TAG_READY_EVENT] = {
.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
.min_len = sizeof(struct wmi_service_available_event) },
[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
[WMI_TAG_HOST_SWFDA_EVENT] = {
.min_len = sizeof(struct wmi_fils_discovery_event) },
[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
};
static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
{
return le32_encode_bits(cmd, WMI_TLV_TAG) |
le32_encode_bits(len, WMI_TLV_LEN);
}
static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
{
return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
}
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config)
{
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
if (ab->num_radios == 2) {
config->num_peers = TARGET_NUM_PEERS(DBS);
config->num_tids = TARGET_NUM_TIDS(DBS);
} else if (ab->num_radios == 3) {
config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
} else {
/* Control should not reach here */
config->num_peers = TARGET_NUM_PEERS(SINGLE);
config->num_tids = TARGET_NUM_TIDS(SINGLE);
}
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
else
config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
config->dma_burst_size = TARGET_DMA_BURST_SIZE;
config->rx_skip_defrag_timeout_dup_detection_check =
TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
config->vow_config = TARGET_VOW_CONFIG;
config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
config->rx_batchmode = TARGET_RX_BATCHMODE;
/* Indicates host supports peer map v3 and unmap v2 support */
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config)
{
config->num_vdevs = 4;
config->num_peers = 16;
config->num_tids = 32;
config->num_offload_peers = 3;
config->num_offload_reorder_buffs = 3;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
config->num_mcast_groups = 0;
config->num_mcast_table_elems = 0;
config->mcast2ucast_mode = 0;
config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
config->num_wds_entries = 0;
config->dma_burst_size = 0;
config->rx_skip_defrag_timeout_dup_detection_check = 0;
config->vow_config = TARGET_VOW_CONFIG;
config->gtk_offload_max_vdev = 2;
config->num_msdu_desc = 0x400;
config->beacon_tx_offload_max_vdev = 2;
config->rx_batchmode = TARGET_RX_BATCHMODE;
config->peer_map_unmap_version = 0x1;
config->use_pdev_id = 1;
config->max_frag_entries = 0xa;
config->num_tdls_vdevs = 0x1;
config->num_tdls_conn_table_entries = 8;
config->beacon_tx_offload_max_vdev = 0x2;
config->num_multicast_filter_entries = 0x20;
config->num_wow_filters = 0x16;
config->num_keep_alive_pattern = 0;
}
#define PRIMAP(_hw_mode_) \
[_hw_mode_] = _hw_mode_##_PRI
static const int ath12k_hw_mode_pri_map[] = {
PRIMAP(WMI_HOST_HW_MODE_SINGLE),
PRIMAP(WMI_HOST_HW_MODE_DBS),
PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
PRIMAP(WMI_HOST_HW_MODE_SBS),
PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
/* keep last */
PRIMAP(WMI_HOST_HW_MODE_MAX),
};
static int
ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
const void *ptr, void *data),
void *data)
{
const void *begin = ptr;
const struct wmi_tlv *tlv;
u16 tlv_tag, tlv_len;
int ret;
while (len > 0) {
if (len < sizeof(*tlv)) {
ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
ptr - begin, len, sizeof(*tlv));
return -EINVAL;
}
tlv = ptr;
tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
ptr += sizeof(*tlv);
len -= sizeof(*tlv);
if (tlv_len > len) {
ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
ath12k_wmi_tlv_policies[tlv_tag].min_len &&
ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
tlv_tag, ptr - begin, tlv_len,
ath12k_wmi_tlv_policies[tlv_tag].min_len);
return -EINVAL;
}
ret = iter(ab, tlv_tag, tlv_len, ptr, data);
if (ret)
return ret;
ptr += tlv_len;
len -= tlv_len;
}
return 0;
}
static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
const void *ptr, void *data)
{
const void **tb = data;
if (tag < WMI_TAG_MAX)
tb[tag] = ptr;
return 0;
}
static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
const void *ptr, size_t len)
{
return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
(void *)tb);
}
static const void **
ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
size_t len, gfp_t gfp)
{
const void **tb;
int ret;
tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
if (!tb)
return ERR_PTR(-ENOMEM);
ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
}
return tb;
}
static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
u32 cmd_id)
{
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct ath12k_base *ab = wmi->wmi_ab->ab;
struct wmi_cmd_hdr *cmd_hdr;
int ret;
if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
return -ENOMEM;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
memset(skb_cb, 0, sizeof(*skb_cb));
ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
if (ret)
goto err_pull;
return 0;
err_pull:
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
return ret;
}
int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
u32 cmd_id)
{
struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab;
int ret = -EOPNOTSUPP;
might_sleep();
wait_event_timeout(wmi_sc->tx_credits_wq, ({
ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
ret = -ESHUTDOWN;
(ret != -EAGAIN);
}), WMI_SEND_TIMEOUT_HZ);
if (ret == -EAGAIN)
ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
return ret;
}
static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
const void *ptr,
struct ath12k_wmi_service_ext_arg *arg)
{
const struct wmi_service_ready_ext_event *ev = ptr;
int i;
if (!ev)
return -EINVAL;
/* Move this to host based bitmap */
arg->default_conc_scan_config_bits =
le32_to_cpu(ev->default_conc_scan_config_bits);
arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
for (i = 0; i < WMI_MAX_NUM_SS; i++)
arg->ppet.ppet16_ppet8_ru3_ru0[i] =
le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
return 0;
}
static int
ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
struct ath12k_wmi_svc_rdy_ext_parse *svc,
u8 hw_mode_id, u8 phy_id,
struct ath12k_pdev *pdev)
{
const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
struct ath12k_band_cap *cap_band;
struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
struct ath12k_fw_pdev *fw_pdev;
u32 phy_map;
u32 hw_idx, phy_idx = 0;
int i;
if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
return -EINVAL;
for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
break;
phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
phy_idx = fls(phy_map);
}
if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
return -EINVAL;
phy_idx += phy_id;
if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
return -EINVAL;
mac_caps = wmi_mac_phy_caps + phy_idx;
pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
ab->fw_pdev_count++;
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
* band to band for a single radio, need to see how this should be
* handled.
*/
if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
} else {
return -EINVAL;
}
/* tx/rx chainmask reported from fw depends on the actual hw chains used,
* For example, for 4x4 capable macphys, first 4 chains can be used for first
* mac and the remaining 4 chains can be used for the second mac or vice-versa.
* In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
* will be advertised for second mac or vice-versa. Compute the shift value
* for tx/rx chainmask which will be used to advertise supported ht/vht rates to
* mac80211.
*/
pdev_cap->tx_chain_mask_shift =
find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
pdev_cap->rx_chain_mask_shift =
find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
cap_band->he_cap_phy_info[i] =
le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
for (i = 0; i < WMI_MAX_NUM_SS; i++)
cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
}
if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
cap_band->max_bw_supported =
le32_to_cpu(mac_caps->max_bw_supported_5g);
cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
cap_band->he_cap_phy_info[i] =
le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
for (i = 0; i < WMI_MAX_NUM_SS; i++)
cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
cap_band->max_bw_supported =
le32_to_cpu(mac_caps->max_bw_supported_5g);
cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
cap_band->he_cap_phy_info[i] =
le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
for (i = 0; i < WMI_MAX_NUM_SS; i++)
cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
}
return 0;
}
static int
ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
u8 phy_idx,
struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
{
const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
if (!reg_caps || !ext_caps)
return -EINVAL;
if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
return -EINVAL;
ext_reg_cap = &ext_caps[phy_idx];
param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
param->eeprom_reg_domain_ext =
le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
/* check if param->wireless_mode is needed */
param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
return 0;
}
static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
const void *evt_buf,
struct ath12k_wmi_target_cap_arg *cap)
{
const struct wmi_service_ready_event *ev = evt_buf;
if (!ev) {
ath12k_err(ab, "%s: failed by NULL param\n",
__func__);
return -EINVAL;
}
cap->phy_capability = le32_to_cpu(ev->phy_capability);
cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
return 0;
}
/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
* wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
* 4-byte word.
*/
static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
const u32 *wmi_svc_bm)
{
int i, j;
for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
do {
if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
set_bit(j, wmi->wmi_ab->svc_map);
} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
}
}
static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_svc_ready_parse *svc_ready = data;
struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
u16 expect_len;
switch (tag) {
case WMI_TAG_SERVICE_READY_EVENT:
if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
return -EINVAL;
break;
case WMI_TAG_ARRAY_UINT32:
if (!svc_ready->wmi_svc_bitmap_done) {
expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
if (len < expect_len) {
ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
svc_ready->wmi_svc_bitmap_done = true;
}
break;
default:
break;
}
return 0;
}
static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k_wmi_svc_ready_parse svc_ready = { };
int ret;
ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
ath12k_wmi_svc_rdy_parse,
&svc_ready);
if (ret) {
ath12k_warn(ab, "failed to parse tlv %d\n", ret);
return ret;
}
return 0;
}
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len)
{
struct sk_buff *skb;
struct ath12k_base *ab = wmi_sc->ab;
u32 round_len = roundup(len, 4);
skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
if (!skb)
return NULL;
skb_reserve(skb, WMI_SKB_HEADROOM);
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath12k_warn(ab, "unaligned WMI skb data\n");
skb_put(skb, round_len);
memset(skb->data, 0, round_len);
return skb;
}
int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_mgmt_send_cmd *cmd;
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
u32 buf_len;
int ret, len;
buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_mgmt_send_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->desc_id = cpu_to_le32(buf_id);
cmd->chanfreq = 0;
cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->frame_len = cpu_to_le32(frame->len);
cmd->buf_len = cpu_to_le32(buf_len);
cmd->tx_params_valid = 0;
frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
memcpy(frame_tlv->value, frame->data, buf_len);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
struct ath12k_wmi_vdev_create_arg *args)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
struct wmi_tlv *tlv;
int ret, len;
void *ptr;
/* It can be optimized my sending tx/rx chain configuration
* only for supported bands instead of always sending it for
* both the bands.
*/
len = sizeof(*cmd) + TLV_HDR_SIZE +
(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_create_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(args->if_id);
cmd->vdev_type = cpu_to_le32(args->type);
cmd->vdev_subtype = cpu_to_le32(args->subtype);
cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
cmd->pdev_id = cpu_to_le32(args->pdev_id);
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
ptr = skb->data + sizeof(*cmd);
len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
txrx_streams = ptr;
len = sizeof(*txrx_streams);
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
txrx_streams->supported_tx_streams =
args->chains[NL80211_BAND_2GHZ].tx;
txrx_streams->supported_rx_streams =
args->chains[NL80211_BAND_2GHZ].rx;
txrx_streams++;
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
txrx_streams->supported_tx_streams =
args->chains[NL80211_BAND_5GHZ].tx;
txrx_streams->supported_rx_streams =
args->chains[NL80211_BAND_5GHZ].rx;
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
args->if_id, args->type, args->subtype,
macaddr, args->pdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to submit WMI_VDEV_CREATE_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_delete_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_delete_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_stop_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_stop_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_down_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_down_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
struct wmi_vdev_start_req_arg *arg)
{
memset(chan, 0, sizeof(*chan));
chan->mhz = cpu_to_le32(arg->freq);
chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
if (arg->mode == MODE_11AC_VHT80_80)
chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
else
chan->band_center_freq2 = 0;
chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
if (arg->passive)
chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
if (arg->allow_ibss)
chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
if (arg->allow_ht)
chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
if (arg->allow_vht)
chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
if (arg->allow_he)
chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
if (arg->ht40plus)
chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
if (arg->chan_radar)
chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
if (arg->freq2_radar)
chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
chan->reg_info_1 = le32_encode_bits(arg->max_power,
WMI_CHAN_REG_INFO1_MAX_PWR) |
le32_encode_bits(arg->max_reg_power,
WMI_CHAN_REG_INFO1_MAX_REG_PWR);
chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
WMI_CHAN_REG_INFO2_ANT_MAX) |
le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
}
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_channel_params *chan;
struct wmi_tlv *tlv;
void *ptr;
int ret, len;
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
cmd->dtim_period = cpu_to_le32(arg->dtim_period);
cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
cmd->regdomain = cpu_to_le32(arg->regdomain);
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
if (!restart) {
if (arg->ssid) {
cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
}
if (arg->hidden_ssid)
cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
if (arg->pmf_enabled)
cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
}
cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
ptr = skb->data + sizeof(*cmd);
chan = ptr;
ath12k_wmi_put_wmi_channel(chan, arg);
chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
sizeof(*chan));
ptr += sizeof(*chan);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
/* Note: This is a nested TLV containing:
* [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
restart ? "restart" : "start", arg->vdev_id,
arg->freq, arg->mode);
if (restart)
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_VDEV_RESTART_REQUEST_CMDID);
else
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_VDEV_START_REQUEST_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
restart ? "restart" : "start");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_up_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->vdev_assoc_id = cpu_to_le32(aid);
ether_addr_copy(cmd->vdev_bssid.addr, bssid);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
vdev_id, aid, bssid);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
struct ath12k_wmi_peer_create_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_create_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
sizeof(*cmd));
ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
cmd->peer_type = cpu_to_le32(arg->peer_type);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI peer create vdev_id %d peer_addr %pM\n",
arg->vdev_id, arg->peer_addr);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
const u8 *peer_addr, u8 vdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_delete_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_delete_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
sizeof(*cmd));
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
cmd->vdev_id = cpu_to_le32(vdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI peer delete vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
struct ath12k_wmi_pdev_set_regdomain_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_pdev_set_regdomain_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
sizeof(*cmd));
cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
cmd->pdev_id = cpu_to_le32(arg->pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
arg->current_rd_in_use, arg->current_rd_2g,
arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
u32 vdev_id, u32 param_id, u32 param_val)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_set_param_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_set_param_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
sizeof(*cmd));
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->param_id = cpu_to_le32(param_id);
cmd->param_value = cpu_to_le32(param_val);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev %d peer 0x%pM set param %d value %d\n",
vdev_id, peer_addr, param_id, param_val);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
u8 peer_addr[ETH_ALEN],
u32 peer_tid_bitmap,
u8 vdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_flush_tids_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
sizeof(*cmd));
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
cmd->vdev_id = cpu_to_le32(vdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
vdev_id, peer_addr, peer_tid_bitmap);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_PEER_FLUSH_TIDS cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
int vdev_id, const u8 *addr,
dma_addr_t paddr, u8 tid,
u8 ba_window_size_valid,
u32 ba_window_size)
{
struct wmi_peer_reorder_queue_setup_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
sizeof(*cmd));
ether_addr_copy(cmd->peer_macaddr.addr, addr);
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->tid = cpu_to_le32(tid);
cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
cmd->queue_no = cpu_to_le32(tid);
cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
cmd->ba_window_size = cpu_to_le32(ba_window_size);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
addr, vdev_id, tid);
ret = ath12k_wmi_cmd_send(ar->wmi, skb,
WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
dev_kfree_skb(skb);
}
return ret;
}
int
ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_reorder_queue_remove_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
sizeof(*cmd));
ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
u32 param_value, u8 pdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_pdev_set_param_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
sizeof(*cmd));
cmd->pdev_id = cpu_to_le32(pdev_id);
cmd->param_id = cpu_to_le32(param_id);
cmd->param_value = cpu_to_le32(param_value);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI pdev set param %d pdev id %d value %d\n",
param_id, pdev_id, param_value);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_pdev_set_ps_mode_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->sta_ps_mode = cpu_to_le32(enable);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev set psmode %d vdev id %d\n",
enable, vdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
u32 pdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_pdev_suspend_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
sizeof(*cmd));
cmd->suspend_opt = cpu_to_le32(suspend_opt);
cmd->pdev_id = cpu_to_le32(pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI pdev suspend pdev_id %d\n", pdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_pdev_resume_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_resume_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
sizeof(*cmd));
cmd->pdev_id = cpu_to_le32(pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI pdev resume pdev id %d\n", pdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
/* TODO FW Support for the cmd is not available yet.
* Can be tested once the command and corresponding
* event is implemented in FW
*/
int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
enum wmi_bss_chan_info_req_type type)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_pdev_bss_chan_info_req_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
sizeof(*cmd));
cmd->req_type = cpu_to_le32(type);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI bss chan info req type %d\n", type);
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
struct ath12k_wmi_ap_ps_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_ap_ps_peer_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
cmd->param = cpu_to_le32(arg->param);
cmd->value = cpu_to_le32(arg->value);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI set ap ps vdev id %d peer %pM param %d value %d\n",
arg->vdev_id, peer_addr, arg->param, arg->value);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
u32 param, u32 param_value)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_sta_powersave_param_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->param = cpu_to_le32(param);
cmd->value = cpu_to_le32(param_value);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI set sta ps vdev_id %d param %d value %d\n",
vdev_id, param, param_value);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_force_fw_hang_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
len);
cmd->type = cpu_to_le32(type);
cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
if (ret) {
ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
u32 param_id, u32 param_value)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_set_param_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->param_id = cpu_to_le32(param_id);
cmd->param_value = cpu_to_le32(param_value);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev id 0x%x set param %d value %d\n",
vdev_id, param_id, param_value);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_VDEV_SET_PARAM_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_get_pdev_temperature_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
sizeof(*cmd));
cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
u32 vdev_id, u32 bcn_ctrl_op)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_bcn_offload_ctrl_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
vdev_id, bcn_ctrl_op);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_bcn_tmpl_cmd *cmd;
struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
cmd->buf_len = cpu_to_le32(bcn->len);
ptr = skb->data + sizeof(*cmd);
bcn_prb_info = ptr;
len = sizeof(*bcn_prb_info);
bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
len);
bcn_prb_info->caps = 0;
bcn_prb_info->erp = 0;
ptr += sizeof(*bcn_prb_info);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
memcpy(tlv->value, bcn->data, bcn->len);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_vdev_install_key(struct ath12k *ar,
struct wmi_vdev_install_key_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_install_key_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
int ret, len, key_len_aligned;
/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
* length is specified in cmd->key_len.
*/
key_len_aligned = roundup(arg->key_len, 4);
len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
cmd->key_idx = cpu_to_le32(arg->key_idx);
cmd->key_flags = cpu_to_le32(arg->key_flags);
cmd->key_cipher = cpu_to_le32(arg->key_cipher);
cmd->key_len = cpu_to_le32(arg->key_len);
cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
if (arg->key_rsc_counter)
cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
memcpy(tlv->value, arg->key_data, arg->key_len);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev install key idx %d cipher %d len %d\n",
arg->key_idx, arg->key_cipher, arg->key_len);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_VDEV_INSTALL_KEY cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
struct ath12k_wmi_peer_assoc_arg *arg,
bool hw_crypto_disabled)
{
cmd->peer_flags = 0;
cmd->peer_flags_ext = 0;
if (arg->is_wme_set) {
if (arg->qos_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
if (arg->apsd_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
if (arg->ht_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
if (arg->bw_40)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
if (arg->bw_80)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
if (arg->bw_160)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
if (arg->bw_320)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
/* Typically if STBC is enabled for VHT it should be enabled
* for HT as well
**/
if (arg->stbc_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
/* Typically if LDPC is enabled for VHT it should be enabled
* for HT as well
**/
if (arg->ldpc_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
if (arg->static_mimops_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
if (arg->dynamic_mimops_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
if (arg->spatial_mux_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
if (arg->vht_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
if (arg->he_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
if (arg->twt_requester)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
if (arg->twt_responder)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
if (arg->eht_flag)
cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
}
/* Suppress authorization for all AUTH modes that need 4-way handshake
* (during re-association).
* Authorization will be done for these modes on key installation.
*/
if (arg->auth_flag)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
if (arg->need_ptk_4_way) {
cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
if (!hw_crypto_disabled)
cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
}
if (arg->need_gtk_2_way)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
/* safe mode bypass the 4-way handshake */
if (arg->safe_mode_enabled)
cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
WMI_PEER_NEED_GTK_2_WAY));
if (arg->is_pmf_enabled)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
/* Disable AMSDU for station transmit, if user configures it */
/* Disable AMSDU for AP transmit to 11n Stations, if user configures
* it
* if (arg->amsdu_disable) Add after FW support
**/
/* Target asserts if node is marked HT and all MCS is set to 0.
* Mark the node as non-HT if all the mcs rates are disabled through
* iwpriv
**/
if (arg->peer_ht_rates.num_rates == 0)
cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
}
int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_assoc_complete_cmd *cmd;
struct ath12k_wmi_vht_rate_set_params *mcs;
struct ath12k_wmi_he_rate_set_params *he_mcs;
struct ath12k_wmi_eht_rate_set_params *eht_mcs;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 peer_legacy_rates_align;
u32 peer_ht_rates_align;
int i, ret, len;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
sizeof(u32));
peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
sizeof(u32));
len = sizeof(*cmd) +
TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
sizeof(*mcs) + TLV_HDR_SIZE +
(sizeof(*he_mcs) * arg->peer_he_mcs_count) +
TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
TLV_HDR_SIZE + TLV_HDR_SIZE;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
ptr = skb->data;
cmd = ptr;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
cmd->peer_associd = cpu_to_le32(arg->peer_associd);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
ath12k_wmi_copy_peer_flags(cmd, arg,
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
&ar->ab->dev_flags));
ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
cmd->peer_caps = cpu_to_le32(arg->peer_caps);
cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
/* Update 11ax capabilities */
cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
cmd->peer_he_cap_phy[i] =
cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
for (i = 0; i < WMI_MAX_NUM_SS; i++)
cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
/* Update 11be capabilities */
memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
0);
memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
0);
memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
&arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
/* Update peer legacy rate information */
ptr += sizeof(*cmd);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
ptr += TLV_HDR_SIZE;
cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
memcpy(ptr, arg->peer_legacy_rates.rates,
arg->peer_legacy_rates.num_rates);
/* Update peer HT rate information */
ptr += peer_legacy_rates_align;
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
ptr += TLV_HDR_SIZE;
cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
memcpy(ptr, arg->peer_ht_rates.rates,
arg->peer_ht_rates.num_rates);
/* VHT Rates */
ptr += peer_ht_rates_align;
mcs = ptr;
mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
sizeof(*mcs));
cmd->peer_nss = cpu_to_le32(arg->peer_nss);
/* Update bandwidth-NSS mapping */
cmd->peer_bw_rxnss_override = 0;
cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
if (arg->vht_capable) {
mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
}
/* HE Rates */
cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
ptr += sizeof(*mcs);
len = arg->peer_he_mcs_count * sizeof(*he_mcs);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
/* Loop through the HE rate set */
for (i = 0; i < arg->peer_he_mcs_count; i++) {
he_mcs = ptr;
he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
sizeof(*he_mcs));
he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
ptr += sizeof(*he_mcs);
}
/* MLO header tag with 0 length */
len = 0;
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
/* Loop through the EHT rate set */
len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
for (i = 0; i < arg->peer_eht_mcs_count; i++) {
eht_mcs = ptr;
eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
sizeof(*eht_mcs));
eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
ptr += sizeof(*eht_mcs);
}
/* ML partner links tag with 0 length */
len = 0;
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
cmd->peer_listen_intval, cmd->peer_ht_caps,
cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
cmd->peer_mpdu_density,
cmd->peer_vht_caps, cmd->peer_he_cap_info,
cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
cmd->peer_he_cap_phy[2],
cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
cmd->peer_eht_cap_phy[2]);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_PEER_ASSOC_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
void ath12k_wmi_start_scan_init(struct ath12k *ar,
struct ath12k_wmi_scan_req_arg *arg)
{
/* setup commonly used values */
arg->scan_req_id = 1;
arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
arg->dwell_time_active_6g = 40;
arg->dwell_time_passive_6g = 30;
arg->min_rest_time = 50;
arg->max_rest_time = 500;
arg->repeat_probe_time = 0;
arg->probe_spacing_time = 0;
arg->idle_time = 0;
arg->max_scan_time = 20000;
arg->probe_delay = 5;
arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
WMI_SCAN_EVENT_COMPLETED |
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
arg->num_bssid = 1;
/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
* ZEROs in probe request
*/
eth_broadcast_addr(arg->bssid_list[0].addr);
}
static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
struct ath12k_wmi_scan_req_arg *arg)
{
/* Scan events subscription */
if (arg->scan_ev_started)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
if (arg->scan_ev_completed)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
if (arg->scan_ev_bss_chan)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
if (arg->scan_ev_foreign_chan)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
if (arg->scan_ev_dequeued)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
if (arg->scan_ev_preempted)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
if (arg->scan_ev_start_failed)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
if (arg->scan_ev_restarted)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
if (arg->scan_ev_foreign_chn_exit)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
if (arg->scan_ev_suspended)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
if (arg->scan_ev_resumed)
cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
/** Set scan control flags */
cmd->scan_ctrl_flags = 0;
if (arg->scan_f_passive)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
if (arg->scan_f_strict_passive_pch)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
if (arg->scan_f_promisc_mode)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
if (arg->scan_f_capture_phy_err)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
if (arg->scan_f_half_rate)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
if (arg->scan_f_quarter_rate)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
if (arg->scan_f_cck_rates)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
if (arg->scan_f_ofdm_rates)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
if (arg->scan_f_chan_stat_evnt)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
if (arg->scan_f_filter_prb_req)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
if (arg->scan_f_bcast_probe)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
if (arg->scan_f_offchan_mgmt_tx)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
if (arg->scan_f_offchan_data_tx)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
if (arg->scan_f_force_active_dfs_chn)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
if (arg->scan_f_add_tpc_ie_in_probe)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
if (arg->scan_f_add_ds_ie_in_probe)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
if (arg->scan_f_add_spoofed_mac_in_probe)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
if (arg->scan_f_add_rand_seq_in_probe)
cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
if (arg->scan_f_en_ie_whitelist_in_probe)
cmd->scan_ctrl_flags |=
cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
WMI_SCAN_DWELL_MODE_MASK);
}
int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
struct ath12k_wmi_scan_req_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_start_scan_cmd *cmd;
struct ath12k_wmi_ssid_params *ssid = NULL;
struct ath12k_wmi_mac_addr_params *bssid;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
int i, ret, len;
u32 *tmp_ptr, extraie_len_with_pad = 0;
struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
len = sizeof(*cmd);
len += TLV_HDR_SIZE;
if (arg->num_chan)
len += arg->num_chan * sizeof(u32);
len += TLV_HDR_SIZE;
if (arg->num_ssids)
len += arg->num_ssids * sizeof(*ssid);
len += TLV_HDR_SIZE;
if (arg->num_bssid)
len += sizeof(*bssid) * arg->num_bssid;
if (arg->num_hint_bssid)
len += TLV_HDR_SIZE +
arg->num_hint_bssid * sizeof(*hint_bssid);
if (arg->num_hint_s_ssid)
len += TLV_HDR_SIZE +
arg->num_hint_s_ssid * sizeof(*s_ssid);
len += TLV_HDR_SIZE;
if (arg->extraie.len)
extraie_len_with_pad =
roundup(arg->extraie.len, sizeof(u32));
if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
len += extraie_len_with_pad;
} else {
ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
arg->extraie.len);
extraie_len_with_pad = 0;
}
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
ptr = skb->data;
cmd = ptr;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
sizeof(*cmd));
cmd->scan_id = cpu_to_le32(arg->scan_id);
cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
cmd->scan_priority = cpu_to_le32(arg->scan_priority);
cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
cmd->idle_time = cpu_to_le32(arg->idle_time);
cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
cmd->probe_delay = cpu_to_le32(arg->probe_delay);
cmd->burst_duration = cpu_to_le32(arg->burst_duration);
cmd->num_chan = cpu_to_le32(arg->num_chan);
cmd->num_bssid = cpu_to_le32(arg->num_bssid);
cmd->num_ssids = cpu_to_le32(arg->num_ssids);
cmd->ie_len = cpu_to_le32(arg->extraie.len);
cmd->n_probes = cpu_to_le32(arg->n_probes);
ptr += sizeof(*cmd);
len = arg->num_chan * sizeof(u32);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
ptr += TLV_HDR_SIZE;
tmp_ptr = (u32 *)ptr;
memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
ptr += len;
len = arg->num_ssids * sizeof(*ssid);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
ptr += TLV_HDR_SIZE;
if (arg->num_ssids) {
ssid = ptr;
for (i = 0; i < arg->num_ssids; ++i) {
ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
memcpy(ssid->ssid, arg->ssid[i].ssid,
arg->ssid[i].ssid_len);
ssid++;
}
}
ptr += (arg->num_ssids * sizeof(*ssid));
len = arg->num_bssid * sizeof(*bssid);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
ptr += TLV_HDR_SIZE;
bssid = ptr;
if (arg->num_bssid) {
for (i = 0; i < arg->num_bssid; ++i) {
ether_addr_copy(bssid->addr,
arg->bssid_list[i].addr);
bssid++;
}
}
ptr += arg->num_bssid * sizeof(*bssid);
len = extraie_len_with_pad;
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
ptr += TLV_HDR_SIZE;
if (extraie_len_with_pad)
memcpy(ptr, arg->extraie.ptr,
arg->extraie.len);
ptr += extraie_len_with_pad;
if (arg->num_hint_s_ssid) {
len = arg->num_hint_s_ssid * sizeof(*s_ssid);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
ptr += TLV_HDR_SIZE;
s_ssid = ptr;
for (i = 0; i < arg->num_hint_s_ssid; ++i) {
s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
s_ssid++;
}
ptr += len;
}
if (arg->num_hint_bssid) {
len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
ptr += TLV_HDR_SIZE;
hint_bssid = ptr;
for (i = 0; i < arg->num_hint_bssid; ++i) {
hint_bssid->freq_flags =
arg->hint_bssid[i].freq_flags;
ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
&hint_bssid->bssid.addr[0]);
hint_bssid++;
}
}
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_START_SCAN_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
struct ath12k_wmi_scan_cancel_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_stop_scan_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_stop_scan_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
cmd->requestor = cpu_to_le32(arg->requester);
cmd->scan_id = cpu_to_le32(arg->scan_id);
cmd->pdev_id = cpu_to_le32(arg->pdev_id);
/* stop the scan with the corresponding scan_id */
if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
/* Cancelling all scans */
cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
/* Cancelling VAP scans */
cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
/* Cancelling specific scan */
cmd->req_type = WMI_SCAN_STOP_ONE;
} else {
ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
arg->req_type);
dev_kfree_skb(skb);
return -EINVAL;
}
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_STOP_SCAN_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
struct ath12k_wmi_scan_chan_list_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_scan_chan_list_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_channel_params *chan_info;
struct ath12k_wmi_channel_arg *channel_arg;
struct wmi_tlv *tlv;
void *ptr;
int i, ret, len;
u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
__le32 *reg1, *reg2;
channel_arg = &arg->channel[0];
while (arg->nallchans) {
len = sizeof(*cmd) + TLV_HDR_SIZE;
max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
sizeof(*chan_info);
num_send_chans = min(arg->nallchans, max_chan_limit);
arg->nallchans -= num_send_chans;
len += sizeof(*chan_info) * num_send_chans;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
sizeof(*cmd));
cmd->pdev_id = cpu_to_le32(arg->pdev_id);
cmd->num_scan_chans = cpu_to_le32(num_send_chans);
if (num_sends)
cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
num_send_chans, len, cmd->pdev_id, num_sends);
ptr = skb->data + sizeof(*cmd);
len = sizeof(*chan_info) * num_send_chans;
tlv = ptr;
tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
len);
ptr += TLV_HDR_SIZE;
for (i = 0; i < num_send_chans; ++i) {
chan_info = ptr;
memset(chan_info, 0, sizeof(*chan_info));
len = sizeof(*chan_info);
chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
len);
reg1 = &chan_info->reg_info_1;
reg2 = &chan_info->reg_info_2;
chan_info->mhz = cpu_to_le32(channel_arg->mhz);
chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
if (channel_arg->is_chan_passive)
chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
if (channel_arg->allow_he)
chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
else if (channel_arg->allow_vht)
chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
else if (channel_arg->allow_ht)
chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
if (channel_arg->half_rate)
chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
if (channel_arg->quarter_rate)
chan_info->info |=
cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
if (channel_arg->psc_channel)
chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
if (channel_arg->dfs_set)
chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
WMI_CHAN_INFO_MODE);
*reg1 |= le32_encode_bits(channel_arg->minpower,
WMI_CHAN_REG_INFO1_MIN_PWR);
*reg1 |= le32_encode_bits(channel_arg->maxpower,
WMI_CHAN_REG_INFO1_MAX_PWR);
*reg1 |= le32_encode_bits(channel_arg->maxregpower,
WMI_CHAN_REG_INFO1_MAX_REG_PWR);
*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
WMI_CHAN_REG_INFO1_REG_CLS);
*reg2 |= le32_encode_bits(channel_arg->antennamax,
WMI_CHAN_REG_INFO2_ANT_MAX);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
i, chan_info->mhz, chan_info->info);
ptr += sizeof(*chan_info);
channel_arg++;
}
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
dev_kfree_skb(skb);
return ret;
}
num_sends++;
}
return 0;
}
int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
struct wmi_wmm_params_all_arg *param)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_set_wmm_params_cmd *cmd;
struct wmi_wmm_params *wmm_param;
struct wmi_wmm_params_arg *wmi_wmm_arg;
struct sk_buff *skb;
int ret, ac;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->wmm_param_type = 0;
for (ac = 0; ac < WME_NUM_AC; ac++) {
switch (ac) {
case WME_AC_BE:
wmi_wmm_arg = ¶m->ac_be;
break;
case WME_AC_BK:
wmi_wmm_arg = ¶m->ac_bk;
break;
case WME_AC_VI:
wmi_wmm_arg = ¶m->ac_vi;
break;
case WME_AC_VO:
wmi_wmm_arg = ¶m->ac_vo;
break;
}
wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
wmm_param->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
sizeof(*wmm_param));
wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
ac, wmm_param->aifs, wmm_param->cwmin,
wmm_param->cwmax, wmm_param->txoplimit,
wmm_param->acm, wmm_param->no_ack);
}
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_VDEV_SET_WMM_PARAMS_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
u32 pdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_dfs_phyerr_offload_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
cmd->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
sizeof(*cmd));
cmd->pdev_id = cpu_to_le32(pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI dfs phy err offload enable pdev id %d\n", pdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_delba_send_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_delba_send_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, mac);
cmd->tid = cpu_to_le32(tid);
cmd->initiator = cpu_to_le32(initiator);
cmd->reasoncode = cpu_to_le32(reason);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
vdev_id, mac, tid, initiator, reason);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_DELBA_SEND_CMDID cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 status)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_addba_setresponse_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
cmd->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, mac);
cmd->tid = cpu_to_le32(tid);
cmd->statuscode = cpu_to_le32(status);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
vdev_id, mac, tid, status);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 buf_size)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_addba_send_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_addba_send_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, mac);
cmd->tid = cpu_to_le32(tid);
cmd->buffersize = cpu_to_le32(buf_size);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
vdev_id, mac, tid, buf_size);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_ADDBA_SEND_CMDID cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_addba_clear_resp_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
cmd->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, mac);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
vdev_id, mac);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
struct ath12k_wmi_init_country_arg *arg)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_init_country_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_init_country_cmd *)skb->data;
cmd->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
sizeof(*cmd));
cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
switch (arg->flags) {
case ALPHA_IS_SET:
cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
break;
case CC_IS_SET:
cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
cmd->cc_info.country_code =
cpu_to_le32(arg->cc_info.country_code);
break;
case REGDMN_IS_SET:
cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
break;
default:
ret = -EINVAL;
goto out;
}
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_SET_INIT_COUNTRY_CMDID);
out:
if (ret) {
ath12k_warn(ar->ab,
"failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
ret);
dev_kfree_skb(skb);
}
return ret;
}
int
ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct ath12k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_enable_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
len);
cmd->pdev_id = cpu_to_le32(pdev_id);
cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
cmd->congestion_thresh_setup =
cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
cmd->congestion_thresh_teardown =
cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
cmd->congestion_thresh_critical =
cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
cmd->interference_thresh_teardown =
cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
cmd->interference_thresh_setup =
cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
cmd->no_of_bcast_mcast_slots =
cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
cmd->remove_sta_slot_interval =
cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
/* TODO add MBSSID support */
cmd->mbss_support = 0;
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_TWT_ENABLE_CMDID);
if (ret) {
ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int
ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct ath12k_base *ab = wmi->wmi_ab->ab;
struct wmi_twt_disable_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
len);
cmd->pdev_id = cpu_to_le32(pdev_id);
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_TWT_DISABLE_CMDID);
if (ret) {
ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int
ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct ath12k_base *ab = wmi->wmi_ab->ab;
struct wmi_obss_spatial_reuse_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
len);
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->enable = cpu_to_le32(he_obss_pd->enable);
cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
if (ret) {
ath12k_warn(ab,
"Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
u8 bss_color, u32 period,
bool enable)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct ath12k_base *ab = wmi->wmi_ab->ab;
struct wmi_obss_color_collision_cfg_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
len);
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
cmd->current_bss_color = cpu_to_le32(bss_color);
cmd->detection_period_ms = cpu_to_le32(period);
cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
cmd->free_slot_expiry_time_ms = 0;
cmd->flags = 0;
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
cmd->detection_period_ms, cmd->scan_period_ms);
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
if (ret) {
ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
bool enable)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct ath12k_base *ab = wmi->wmi_ab->ab;
struct wmi_bss_color_change_enable_params_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
len);
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->enable = enable ? cpu_to_le32(1) : 0;
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi_send_bss_color_change_enable id %d enable %d\n",
cmd->vdev_id, cmd->enable);
ret = ath12k_wmi_cmd_send(wmi, skb,
WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
if (ret) {
ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
struct sk_buff *tmpl)
{
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
int ret, len;
size_t aligned_len;
struct wmi_fils_discovery_tmpl_cmd *cmd;
aligned_len = roundup(tmpl->len, 4);
len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev %i set FILS discovery template\n", vdev_id);
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->buf_len = cpu_to_le32(tmpl->len);
ptr = skb->data + sizeof(*cmd);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
memcpy(tlv->value, tmpl->data, tmpl->len);
ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"WMI vdev %i failed to send FILS discovery template command\n",
vdev_id);
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
struct sk_buff *tmpl)
{
struct wmi_probe_tmpl_cmd *cmd;
struct ath12k_wmi_bcn_prb_info_params *probe_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
int ret, len;
size_t aligned_len = roundup(tmpl->len, 4);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev %i set probe response template\n", vdev_id);
len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->buf_len = cpu_to_le32(tmpl->len);
ptr = skb->data + sizeof(*cmd);
probe_info = ptr;
len = sizeof(*probe_info);
probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
len);
probe_info->caps = 0;
probe_info->erp = 0;
ptr += sizeof(*probe_info);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
memcpy(tlv->value, tmpl->data, tmpl->len);
ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"WMI vdev %i failed to send probe response template command\n",
vdev_id);
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
bool unsol_bcast_probe_resp_enabled)
{
struct sk_buff *skb;
int ret, len;
struct wmi_fils_discovery_cmd *cmd;
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev %i set %s interval to %u TU\n",
vdev_id, unsol_bcast_probe_resp_enabled ?
"unsolicited broadcast probe response" : "FILS discovery",
interval);
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_fils_discovery_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
len);
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->interval = cpu_to_le32(interval);
cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"WMI vdev %i failed to send FILS discovery enable/disable command\n",
vdev_id);
dev_kfree_skb(skb);
}
return ret;
}
static void
ath12k_fill_band_to_mac_param(struct ath12k_base *soc,
struct ath12k_wmi_pdev_band_arg *arg)
{
u8 i;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
struct ath12k_pdev *pdev;
for (i = 0; i < soc->num_radios; i++) {
pdev = &soc->pdevs[i];
hal_reg_cap = &soc->hal_reg_cap[i];
arg[i].pdev_id = pdev->pdev_id;
switch (pdev->cap.supported_bands) {
case WMI_HOST_WLAN_2G_5G_CAP:
arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
break;
case WMI_HOST_WLAN_2G_CAP:
arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
break;
case WMI_HOST_WLAN_5G_CAP:
arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
break;
default:
break;
}
}
}
static void
ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
struct ath12k_wmi_resource_config_arg *tg_cfg)
{
wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
wmi_cfg->num_offload_reorder_buffs =
cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
wmi_cfg->roam_offload_max_ap_profiles =
cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
wmi_cfg->num_tdls_conn_table_entries =
cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
wmi_cfg->beacon_tx_offload_max_vdev =
cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
wmi_cfg->num_multicast_filter_entries =
cpu_to_le32(tg_cfg->num_multicast_filter_entries);
wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
wmi_cfg->max_tdls_concurrent_sleep_sta =
cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
wmi_cfg->max_tdls_concurrent_buffer_sta =
cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
}
static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
struct ath12k_wmi_init_cmd_arg *arg)
{
struct ath12k_base *ab = wmi->wmi_ab->ab;
struct sk_buff *skb;
struct wmi_init_cmd *cmd;
struct ath12k_wmi_resource_config_params *cfg;
struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
struct wmi_tlv *tlv;
size_t ret, len;
void *ptr;
u32 hw_mode_len = 0;
u16 idx;
if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
(arg->num_band_to_mac * sizeof(*band_to_mac));
len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
(arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_init_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
sizeof(*cmd));
ptr = skb->data + sizeof(*cmd);
cfg = ptr;
ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
sizeof(*cfg));
ptr += sizeof(*cfg);
host_mem_chunks = ptr + TLV_HDR_SIZE;
len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
host_mem_chunks[idx].tlv_header =
ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
len);
host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
arg->mem_chunks[idx].req_id,
(u64)arg->mem_chunks[idx].paddr,
arg->mem_chunks[idx].len);
}
cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
/* num_mem_chunks is zero */
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE + len;
if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
sizeof(*hw_mode));
hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
ptr += sizeof(*hw_mode);
len = arg->num_band_to_mac * sizeof(*band_to_mac);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
len = sizeof(*band_to_mac);
for (idx = 0; idx < arg->num_band_to_mac; idx++) {
band_to_mac = (void *)ptr;
band_to_mac->tlv_header =
ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
len);
band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
band_to_mac->start_freq =
cpu_to_le32(arg->band_to_mac[idx].start_freq);
band_to_mac->end_freq =
cpu_to_le32(arg->band_to_mac[idx].end_freq);
ptr += sizeof(*band_to_mac);
}
}
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
if (ret) {
ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
int pdev_id)
{
struct ath12k_wmi_pdev_lro_config_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
sizeof(*cmd));
get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
cmd->pdev_id = cpu_to_le32(pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send lro cfg req wmi cmd\n");
goto err;
}
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
{
unsigned long time_left;
time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
if (!time_left)
return -ETIMEDOUT;
return 0;
}
int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
{
unsigned long time_left;
time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
if (!time_left)
return -ETIMEDOUT;
return 0;
}
int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type mode)
{
struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
int len;
int ret;
len = sizeof(*cmd);
skb = ath12k_wmi_alloc_skb(wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
sizeof(*cmd));
cmd->pdev_id = WMI_PDEV_ID_SOC;
cmd->hw_mode_index = cpu_to_le32(mode);
ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
if (ret) {
ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_cmd_init(struct ath12k_base *ab)
{
struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab;
struct ath12k_wmi_init_cmd_arg arg = {};
if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
ab->wmi_ab.svc_map))
arg.res_cfg.is_reg_cc_ext_event_supported = true;
ab->hw_params->wmi_init(ab, &arg.res_cfg);
arg.num_mem_chunks = wmi_sc->num_mem_chunks;
arg.hw_mode_id = wmi_sc->preferred_hw_mode;
arg.mem_chunks = wmi_sc->mem_chunks;
if (ab->hw_params->single_pdev_only)
arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
arg.num_band_to_mac = ab->num_radios;
ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
return ath12k_init_cmd_send(&wmi_sc->wmi[0], &arg);
}
int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
struct ath12k_wmi_vdev_spectral_conf_arg *arg)
{
struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
cmd->scan_count = cpu_to_le32(arg->scan_count);
cmd->scan_period = cpu_to_le32(arg->scan_period);
cmd->scan_priority = cpu_to_le32(arg->scan_priority);
cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI spectral scan config cmd vdev_id 0x%x\n",
arg->vdev_id);
ret = ath12k_wmi_cmd_send(ar->wmi, skb,
WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send spectral scan config wmi cmd\n");
goto err;
}
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
u32 trigger, u32 enable)
{
struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->trigger_cmd = cpu_to_le32(trigger);
cmd->enable_cmd = cpu_to_le32(enable);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI spectral enable cmd vdev id 0x%x\n",
vdev_id);
ret = ath12k_wmi_cmd_send(ar->wmi, skb,
WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send spectral enable wmi cmd\n");
goto err;
}
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
{
struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
struct sk_buff *skb;
int ret;
skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
sizeof(*cmd));
cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
cmd->module_id = cpu_to_le32(arg->module_id);
cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
cmd->num_elems = cpu_to_le32(arg->num_elems);
cmd->buf_size = cpu_to_le32(arg->buf_size);
cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI DMA ring cfg req cmd pdev_id 0x%x\n",
arg->pdev_id);
ret = ath12k_wmi_cmd_send(ar->wmi, skb,
WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
if (ret) {
ath12k_warn(ar->ab,
"failed to send dma ring cfg req wmi cmd\n");
goto err;
}
return 0;
err:
dev_kfree_skb(skb);
return ret;
}
static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_dma_buf_release_arg *arg = data;
if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
return -EPROTO;
if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
return -ENOBUFS;
arg->num_buf_entry++;
return 0;
}
static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_dma_buf_release_arg *arg = data;
if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
return -EPROTO;
if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
return -ENOBUFS;
arg->num_meta++;
return 0;
}
static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_dma_buf_release_arg *arg = data;
const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
u32 pdev_id;
int ret;
switch (tag) {
case WMI_TAG_DMA_BUF_RELEASE:
fixed = ptr;
arg->fixed = *fixed;
pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
arg->fixed.pdev_id = cpu_to_le32(pdev_id);
break;
case WMI_TAG_ARRAY_STRUCT:
if (!arg->buf_entry_done) {
arg->num_buf_entry = 0;
arg->buf_entry = ptr;
ret = ath12k_wmi_tlv_iter(ab, ptr, len,
ath12k_wmi_dma_buf_entry_parse,
arg);
if (ret) {
ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
ret);
return ret;
}
arg->buf_entry_done = true;
} else if (!arg->meta_data_done) {
arg->num_meta = 0;
arg->meta_data = ptr;
ret = ath12k_wmi_tlv_iter(ab, ptr, len,
ath12k_wmi_dma_buf_meta_parse,
arg);
if (ret) {
ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
ret);
return ret;
}
arg->meta_data_done = true;
}
break;
default:
break;
}
return 0;
}
static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ath12k_wmi_dma_buf_release_arg arg = {};
struct ath12k_dbring_buf_release_event param;
int ret;
ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
ath12k_wmi_dma_buf_parse,
&arg);
if (ret) {
ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
return;
}
param.fixed = arg.fixed;
param.buf_entry = arg.buf_entry;
param.num_buf_entry = arg.num_buf_entry;
param.meta_data = arg.meta_data;
param.num_meta = arg.num_meta;
ret = ath12k_dbring_buffer_release_event(ab, ¶m);
if (ret) {
ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
return;
}
}
static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
u32 phy_map = 0;
if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
return -EPROTO;
if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
return -ENOBUFS;
hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
hw_mode_id);
svc_rdy_ext->n_hw_mode_caps++;
phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
svc_rdy_ext->tot_phy_id += fls(phy_map);
return 0;
}
static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
u16 len, const void *ptr, void *data)
{
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
enum wmi_host_hw_mode_config_type mode, pref;
u32 i;
int ret;
svc_rdy_ext->n_hw_mode_caps = 0;
svc_rdy_ext->hw_mode_caps = ptr;
ret = ath12k_wmi_tlv_iter(soc, ptr, len,
ath12k_wmi_hw_mode_caps_parse,
svc_rdy_ext);
if (ret) {
ath12k_warn(soc, "failed to parse tlv %d\n", ret);
return ret;
}
for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
if (mode >= WMI_HOST_HW_MODE_MAX)
continue;
pref = soc->wmi_ab.preferred_hw_mode;
if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
soc->wmi_ab.preferred_hw_mode = mode;
}
}
ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
soc->wmi_ab.preferred_hw_mode);
if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
return -EINVAL;
return 0;
}
static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
return -EPROTO;
if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
return -ENOBUFS;
len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
if (!svc_rdy_ext->n_mac_phy_caps) {
svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
GFP_ATOMIC);
if (!svc_rdy_ext->mac_phy_caps)
return -ENOMEM;
}
memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
svc_rdy_ext->n_mac_phy_caps++;
return 0;
}
static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
return -EPROTO;
if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
return -ENOBUFS;
svc_rdy_ext->n_ext_hal_reg_caps++;
return 0;
}
static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
u16 len, const void *ptr, void *data)
{
struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
int ret;
u32 i;
svc_rdy_ext->n_ext_hal_reg_caps = 0;
svc_rdy_ext->ext_hal_reg_caps = ptr;
ret = ath12k_wmi_tlv_iter(soc, ptr, len,
ath12k_wmi_ext_hal_reg_caps_parse,
svc_rdy_ext);
if (ret) {
ath12k_warn(soc, "failed to parse tlv %d\n", ret);
return ret;
}
for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
svc_rdy_ext->soc_hal_reg_caps,
svc_rdy_ext->ext_hal_reg_caps, i,
®_cap);
if (ret) {
ath12k_warn(soc, "failed to extract reg cap %d\n", i);
return ret;
}
soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
}
return 0;
}
static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
u16 len, const void *ptr,
void *data)
{
struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
u32 phy_id_map;
int pdev_index = 0;
int ret;
svc_rdy_ext->soc_hal_reg_caps = ptr;
svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
soc->num_radios = 0;
phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
soc->fw_pdev_count = 0;
while (phy_id_map && soc->num_radios < MAX_RADIOS) {
ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
svc_rdy_ext,
hw_mode_id, soc->num_radios,
&soc->pdevs[pdev_index]);
if (ret) {
ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
soc->num_radios);
return ret;
}
soc->num_radios++;
/* For single_pdev_only targets,
* save mac_phy capability in the same pdev
*/
if (soc->hw_params->single_pdev_only)
pdev_index = 0;
else
pdev_index = soc->num_radios;
/* TODO: mac_phy_cap prints */
phy_id_map >>= 1;
}
if (soc->hw_params->single_pdev_only) {
soc->num_radios = 1;
soc->pdevs[0].pdev_id = 0;
}
return 0;
}
static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_dma_ring_caps_parse *parse = data;
if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
return -EPROTO;
parse->n_dma_ring_caps++;
return 0;
}
static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
u32 num_cap)
{
size_t sz;
void *ptr;
sz = num_cap * sizeof(struct ath12k_dbring_cap);
ptr = kzalloc(sz, GFP_ATOMIC);
if (!ptr)
return -ENOMEM;
ab->db_caps = ptr;
ab->num_db_cap = num_cap;
return 0;
}
static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
}
static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
u16 len, const void *ptr, void *data)
{
struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
struct ath12k_wmi_dma_ring_caps_params *dma_caps;
struct ath12k_dbring_cap *dir_buff_caps;
int ret;
u32 i;
dma_caps_parse->n_dma_ring_caps = 0;
dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
ret = ath12k_wmi_tlv_iter(ab, ptr, len,
ath12k_wmi_dma_ring_caps_parse,
dma_caps_parse);
if (ret) {
ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
return ret;
}
if (!dma_caps_parse->n_dma_ring_caps)
return 0;
if (ab->num_db_cap) {
ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
return 0;
}
ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
if (ret)
return ret;
dir_buff_caps = ab->db_caps;
for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
ath12k_warn(ab, "Invalid module id %d\n",
le32_to_cpu(dma_caps[i].module_id));
ret = -EINVAL;
goto free_dir_buff;
}
dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
dir_buff_caps[i].pdev_id =
DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
}
return 0;
free_dir_buff:
ath12k_wmi_free_dbring_caps(ab);
return ret;
}
static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
int ret;
switch (tag) {
case WMI_TAG_SERVICE_READY_EXT_EVENT:
ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
&svc_rdy_ext->arg);
if (ret) {
ath12k_warn(ab, "unable to extract ext params\n");
return ret;
}
break;
case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
svc_rdy_ext->hw_caps = ptr;
svc_rdy_ext->arg.num_hw_modes =
le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
break;
case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
svc_rdy_ext);
if (ret)
return ret;
break;
case WMI_TAG_ARRAY_STRUCT:
if (!svc_rdy_ext->hw_mode_done) {
ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
if (ret)
return ret;
svc_rdy_ext->hw_mode_done = true;
} else if (!svc_rdy_ext->mac_phy_done) {
svc_rdy_ext->n_mac_phy_caps = 0;
ret = ath12k_wmi_tlv_iter(ab, ptr, len,
ath12k_wmi_mac_phy_caps_parse,
svc_rdy_ext);
if (ret) {
ath12k_warn(ab, "failed to parse tlv %d\n", ret);
return ret;
}
svc_rdy_ext->mac_phy_done = true;
} else if (!svc_rdy_ext->ext_hal_reg_done) {
ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
if (ret)
return ret;
svc_rdy_ext->ext_hal_reg_done = true;
} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
svc_rdy_ext->mac_phy_chainmask_combo_done = true;
} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
svc_rdy_ext->mac_phy_chainmask_cap_done = true;
} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
svc_rdy_ext->oem_dma_ring_cap_done = true;
} else if (!svc_rdy_ext->dma_ring_cap_done) {
ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
&svc_rdy_ext->dma_caps_parse);
if (ret)
return ret;
svc_rdy_ext->dma_ring_cap_done = true;
}
break;
default:
break;
}
return 0;
}
static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
int ret;
ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
ath12k_wmi_svc_rdy_ext_parse,
&svc_rdy_ext);
if (ret) {
ath12k_warn(ab, "failed to parse tlv %d\n", ret);
goto err;
}
if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
complete(&ab->wmi_ab.service_ready);
kfree(svc_rdy_ext.mac_phy_caps);
return 0;
err:
ath12k_wmi_free_dbring_caps(ab);
return ret;
}
static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
const void *ptr,
struct ath12k_wmi_svc_rdy_ext2_arg *arg)
{
const struct wmi_service_ready_ext2_event *ev = ptr;
if (!ev)
return -EINVAL;
arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
return 0;
}
static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
const __le32 cap_mac_info[],
const __le32 cap_phy_info[],
const __le32 supp_mcs[],
const struct ath12k_wmi_ppe_threshold_params *ppet,
__le32 cap_info_internal)
{
struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
u8 i;
for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
if (band != NL80211_BAND_2GHZ) {
cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
}
cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
for (i = 0; i < WMI_MAX_NUM_SS; i++)
cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
}
static int
ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
const struct ath12k_wmi_caps_ext_params *caps,
struct ath12k_pdev *pdev)
{
u32 bands;
int i;
if (ab->hw_params->single_pdev_only) {
for (i = 0; i < ab->fw_pdev_count; i++) {
struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) &&
fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
bands = fw_pdev->supported_bands;
break;
}
}
if (i == ab->fw_pdev_count)
return -EINVAL;
} else {
bands = pdev->cap.supported_bands;
}
if (bands & WMI_HOST_WLAN_2G_CAP) {
ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
caps->eht_cap_mac_info_2ghz,
caps->eht_cap_phy_info_2ghz,
caps->eht_supp_mcs_ext_2ghz,
&caps->eht_ppet_2ghz,
caps->eht_cap_info_internal);
}
if (bands & WMI_HOST_WLAN_5G_CAP) {
ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
caps->eht_cap_mac_info_5ghz,
caps->eht_cap_phy_info_5ghz,
caps->eht_supp_mcs_ext_5ghz,
&caps->eht_ppet_5ghz,
caps->eht_cap_info_internal);
ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
caps->eht_cap_mac_info_5ghz,
caps->eht_cap_phy_info_5ghz,
caps->eht_supp_mcs_ext_5ghz,
&caps->eht_ppet_5ghz,
caps->eht_cap_info_internal);
}
return 0;
}
static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
u16 len, const void *ptr,
void *data)
{
const struct ath12k_wmi_caps_ext_params *caps = ptr;
int i = 0, ret;
if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
return -EPROTO;
if (ab->hw_params->single_pdev_only) {
if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id))
return 0;
} else {
for (i = 0; i < ab->num_radios; i++) {
if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id))
break;
}
if (i == ab->num_radios)
return -EINVAL;
}
ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
if (ret) {
ath12k_warn(ab,
"failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
ret, ab->pdevs[i].pdev_id);
return ret;
}
return 0;
}
static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
int ret;
switch (tag) {
case WMI_TAG_SERVICE_READY_EXT2_EVENT:
ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
&parse->arg);
if (ret) {
ath12k_warn(ab,
"failed to extract wmi service ready ext2 parameters: %d\n",
ret);
return ret;
}
break;
case WMI_TAG_ARRAY_STRUCT:
if (!parse->dma_ring_cap_done) {
ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
&parse->dma_caps_parse);
if (ret)
return ret;
parse->dma_ring_cap_done = true;
} else if (!parse->spectral_bin_scaling_done) {
/* TODO: This is a place-holder as WMI tag for
* spectral scaling is before
* WMI_TAG_MAC_PHY_CAPABILITIES_EXT
*/
parse->spectral_bin_scaling_done = true;
} else if (!parse->mac_phy_caps_ext_done) {
ret = ath12k_wmi_tlv_iter(ab, ptr, len,
ath12k_wmi_tlv_mac_phy_caps_ext,
parse);
if (ret) {
ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
ret);
return ret;
}
parse->mac_phy_caps_ext_done = true;
}
break;
default:
break;
}
return 0;
}
static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
int ret;
ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
ath12k_wmi_svc_rdy_ext2_parse,
&svc_rdy_ext2);
if (ret) {
ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
goto err;
}
complete(&ab->wmi_ab.service_ready);
return 0;
err:
ath12k_wmi_free_dbring_caps(ab);
return ret;
}
static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
struct wmi_vdev_start_resp_event *vdev_rsp)
{
const void **tb;
const struct wmi_vdev_start_resp_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch vdev start resp ev");
kfree(tb);
return -EPROTO;
}
*vdev_rsp = *ev;
kfree(tb);
return 0;
}
static struct ath12k_reg_rule
*create_ext_reg_rules_from_wmi(u32 num_reg_rules,
struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
{
struct ath12k_reg_rule *reg_rule_ptr;
u32 count;
reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
GFP_ATOMIC);
if (!reg_rule_ptr)
return NULL;
for (count = 0; count < num_reg_rules; count++) {
reg_rule_ptr[count].start_freq =
le32_get_bits(wmi_reg_rule[count].freq_info,
REG_RULE_START_FREQ);
reg_rule_ptr[count].end_freq =
le32_get_bits(wmi_reg_rule[count].freq_info,
REG_RULE_END_FREQ);
reg_rule_ptr[count].max_bw =
le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
REG_RULE_MAX_BW);
reg_rule_ptr[count].reg_power =
le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
REG_RULE_REG_PWR);
reg_rule_ptr[count].ant_gain =
le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
REG_RULE_ANT_GAIN);
reg_rule_ptr[count].flags =
le32_get_bits(wmi_reg_rule[count].flag_info,
REG_RULE_FLAGS);
reg_rule_ptr[count].psd_flag =
le32_get_bits(wmi_reg_rule[count].psd_power_info,
REG_RULE_PSD_INFO);
reg_rule_ptr[count].psd_eirp =
le32_get_bits(wmi_reg_rule[count].psd_power_info,
REG_RULE_PSD_EIRP);
}
return reg_rule_ptr;
}
static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
struct sk_buff *skb,
struct ath12k_reg_info *reg_info)
{
const void **tb;
const struct wmi_reg_chan_list_cc_ext_event *ev;
struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
u32 num_2g_reg_rules, num_5g_reg_rules;
u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
u32 total_reg_rules = 0;
int ret, i, j;
ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
kfree(tb);
return -EPROTO;
}
reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
}
num_2g_reg_rules = reg_info->num_2g_reg_rules;
total_reg_rules += num_2g_reg_rules;
num_5g_reg_rules = reg_info->num_5g_reg_rules;
total_reg_rules += num_5g_reg_rules;
if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
kfree(tb);
return -EINVAL;
}
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
kfree(tb);
return -EINVAL;
}
total_reg_rules += num_6g_reg_rules_ap[i];
}
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6G_REG_RULES) {
ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
i);
kfree(tb);
return -EINVAL;
}
}
if (!total_reg_rules) {
ath12k_warn(ab, "No reg rules available\n");
kfree(tb);
return -EINVAL;
}
memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
/* FIXME: Currently FW includes 6G reg rule also in 5G rule
* list for country US.
* Having same 6G reg rule in 5G and 6G rules list causes
* intersect check to be true, and same rules will be shown
* multiple times in iw cmd. So added hack below to avoid
* parsing 6G rule from 5G reg rule list, and this can be
* removed later, after FW updates to remove 6G reg rule
* from 5G rules list.
*/
if (memcmp(reg_info->alpha2, "US", 2) == 0) {
reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
num_5g_reg_rules = reg_info->num_5g_reg_rules;
}
reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
reg_info->num_phy = le32_to_cpu(ev->num_phy);
reg_info->phy_id = le32_to_cpu(ev->phy_id);
reg_info->ctry_code = le32_to_cpu(ev->country_id);
reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
switch (le32_to_cpu(ev->status_code)) {
case WMI_REG_SET_CC_STATUS_PASS:
reg_info->status_code = REG_SET_CC_STATUS_PASS;
break;
case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
break;
case WMI_REG_INIT_ALPHA2_NOT_FOUND:
reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
break;
case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
break;
case WMI_REG_SET_CC_STATUS_NO_MEMORY:
reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
break;
case WMI_REG_SET_CC_STATUS_FAIL:
reg_info->status_code = REG_SET_CC_STATUS_FAIL;
break;
}
reg_info->is_ext_reg_event = true;
reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
le32_to_cpu(ev->min_bw_6g_client_sp[i]);
reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
le32_to_cpu(ev->max_bw_6g_client_sp[i]);
reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
__func__, reg_info->alpha2, reg_info->dfs_region,
reg_info->min_bw_2g, reg_info->max_bw_2g,
reg_info->min_bw_5g, reg_info->max_bw_5g);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"num_2g_reg_rules %d num_5g_reg_rules %d",
num_2g_reg_rules, num_5g_reg_rules);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
ext_wmi_reg_rule =
(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
+ sizeof(*ev)
+ sizeof(struct wmi_tlv));
if (num_2g_reg_rules) {
reg_info->reg_rules_2g_ptr =
create_ext_reg_rules_from_wmi(num_2g_reg_rules,
ext_wmi_reg_rule);
if (!reg_info->reg_rules_2g_ptr) {
kfree(tb);
ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
return -ENOMEM;
}
}
if (num_5g_reg_rules) {
ext_wmi_reg_rule += num_2g_reg_rules;
reg_info->reg_rules_5g_ptr =
create_ext_reg_rules_from_wmi(num_5g_reg_rules,
ext_wmi_reg_rule);
if (!reg_info->reg_rules_5g_ptr) {
kfree(tb);
ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
return -ENOMEM;
}
}
ext_wmi_reg_rule += num_5g_reg_rules;
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
reg_info->reg_rules_6g_ap_ptr[i] =
create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
ext_wmi_reg_rule);
if (!reg_info->reg_rules_6g_ap_ptr[i]) {
kfree(tb);
ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
return -ENOMEM;
}
ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
}
for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
reg_info->reg_rules_6g_client_ptr[j][i] =
create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
ext_wmi_reg_rule);
if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
kfree(tb);
ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
return -ENOMEM;
}
ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
}
}
reg_info->client_type = le32_to_cpu(ev->client_type);
reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
le32_to_cpu(ev->domain_code_6g_ap_lpi);
reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
le32_to_cpu(ev->domain_code_6g_ap_sp);
reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
le32_to_cpu(ev->domain_code_6g_ap_vlp);
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
le32_to_cpu(ev->domain_code_6g_client_sp[i]);
reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
}
reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
reg_info->client_type, reg_info->domain_code_6g_super_id);
ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
kfree(tb);
return 0;
}
static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
struct wmi_peer_delete_resp_event *peer_del_resp)
{
const void **tb;
const struct wmi_peer_delete_resp_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch peer delete resp ev");
kfree(tb);
return -EPROTO;
}
memset(peer_del_resp, 0, sizeof(*peer_del_resp));
peer_del_resp->vdev_id = ev->vdev_id;
ether_addr_copy(peer_del_resp->peer_macaddr.addr,
ev->peer_macaddr.addr);
kfree(tb);
return 0;
}
static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
struct sk_buff *skb,
u32 *vdev_id)
{
const void **tb;
const struct wmi_vdev_delete_resp_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch vdev delete resp ev");
kfree(tb);
return -EPROTO;
}
*vdev_id = le32_to_cpu(ev->vdev_id);
kfree(tb);
return 0;
}
static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
u32 len, u32 *vdev_id,
u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch bcn tx status ev");
kfree(tb);
return -EPROTO;
}
*vdev_id = le32_to_cpu(ev->vdev_id);
*tx_status = le32_to_cpu(ev->tx_status);
kfree(tb);
return 0;
}
static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
u32 *vdev_id)
{
const void **tb;
const struct wmi_vdev_stopped_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch vdev stop ev");
kfree(tb);
return -EPROTO;
}
*vdev_id = le32_to_cpu(ev->vdev_id);
kfree(tb);
return 0;
}
static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
struct wmi_tlv_mgmt_rx_parse *parse = data;
switch (tag) {
case WMI_TAG_MGMT_RX_HDR:
parse->fixed = ptr;
break;
case WMI_TAG_ARRAY_BYTE:
if (!parse->frame_buf_done) {
parse->frame_buf = ptr;
parse->frame_buf_done = true;
}
break;
}
return 0;
}
static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
struct sk_buff *skb,
struct ath12k_wmi_mgmt_rx_arg *hdr)
{
struct wmi_tlv_mgmt_rx_parse parse = { };
const struct ath12k_wmi_mgmt_rx_params *ev;
const u8 *frame;
int i, ret;
ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
ath12k_wmi_tlv_mgmt_rx_parse,
&parse);
if (ret) {
ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
return ret;
}
ev = parse.fixed;
frame = parse.frame_buf;
if (!ev || !frame) {
ath12k_warn(ab, "failed to fetch mgmt rx hdr");
return -EPROTO;
}
hdr->pdev_id = le32_to_cpu(ev->pdev_id);
hdr->chan_freq = le32_to_cpu(ev->chan_freq);
hdr->channel = le32_to_cpu(ev->channel);
hdr->snr = le32_to_cpu(ev->snr);
hdr->rate = le32_to_cpu(ev->rate);
hdr->phy_mode = le32_to_cpu(ev->phy_mode);
hdr->buf_len = le32_to_cpu(ev->buf_len);
hdr->status = le32_to_cpu(ev->status);
hdr->flags = le32_to_cpu(ev->flags);
hdr->rssi = a_sle32_to_cpu(ev->rssi);
hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
for (i = 0; i < ATH_MAX_ANTENNA; i++)
hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
if (skb->len < (frame - skb->data) + hdr->buf_len) {
ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
return -EPROTO;
}
/* shift the sk_buff to point to `frame` */
skb_trim(skb, 0);
skb_put(skb, frame - skb->data);
skb_pull(skb, frame - skb->data);
skb_put(skb, hdr->buf_len);
return 0;
}
static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
u32 status)
{
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath12k_skb_cb *skb_cb;
int num_mgmt;
spin_lock_bh(&ar->txmgmt_idr_lock);
msdu = idr_find(&ar->txmgmt_idr, desc_id);
if (!msdu) {
ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
desc_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
return -ENOENT;
}
idr_remove(&ar->txmgmt_idr, desc_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
skb_cb = ATH12K_SKB_CB(msdu);
dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(ar->hw, msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
/* WARN when we received this event without doing any mgmt tx */
if (num_mgmt < 0)
WARN_ON_ONCE(1);
if (!num_mgmt)
wake_up(&ar->txmgmt_empty_waitq);
return 0;
}
static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
struct sk_buff *skb,
struct wmi_mgmt_tx_compl_event *param)
{
const void **tb;
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
kfree(tb);
return -EPROTO;
}
param->pdev_id = ev->pdev_id;
param->desc_id = ev->desc_id;
param->status = ev->status;
kfree(tb);
return 0;
}
static void ath12k_wmi_event_scan_started(struct ath12k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
ath12k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH12K_SCAN_STARTING:
ar->scan.state = ATH12K_SCAN_RUNNING;
complete(&ar->scan.started);
break;
}
}
static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
ath12k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH12K_SCAN_STARTING:
complete(&ar->scan.started);
__ath12k_mac_scan_finish(ar);
break;
}
}
static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
case ATH12K_SCAN_STARTING:
/* One suspected reason scan can be completed while starting is
* if firmware fails to deliver all scan events to the host,
* e.g. when transport pipe is full. This has been observed
* with spectral scan phyerr events starving wmi transport
* pipe. In such case the "scan completed" event should be (and
* is) ignored by the host as it may be just firmware's scan
* state machine recovering.
*/
ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
ath12k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
__ath12k_mac_scan_finish(ar);
break;
}
}
static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
case ATH12K_SCAN_STARTING:
ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
ath12k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
ar->scan_channel = NULL;
break;
}
}
static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
case ATH12K_SCAN_STARTING:
ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
ath12k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
break;
}
}
static const char *
ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
enum wmi_scan_completion_reason reason)
{
switch (type) {
case WMI_SCAN_EVENT_STARTED:
return "started";
case WMI_SCAN_EVENT_COMPLETED:
switch (reason) {
case WMI_SCAN_REASON_COMPLETED:
return "completed";
case WMI_SCAN_REASON_CANCELLED:
return "completed [cancelled]";
case WMI_SCAN_REASON_PREEMPTED:
return "completed [preempted]";
case WMI_SCAN_REASON_TIMEDOUT:
return "completed [timedout]";
case WMI_SCAN_REASON_INTERNAL_FAILURE:
return "completed [internal err]";
case WMI_SCAN_REASON_MAX:
break;
}
return "completed [unknown]";
case WMI_SCAN_EVENT_BSS_CHANNEL:
return "bss channel";
case WMI_SCAN_EVENT_FOREIGN_CHAN:
return "foreign channel";
case WMI_SCAN_EVENT_DEQUEUED:
return "dequeued";
case WMI_SCAN_EVENT_PREEMPTED:
return "preempted";
case WMI_SCAN_EVENT_START_FAILED:
return "start failed";
case WMI_SCAN_EVENT_RESTARTED:
return "restarted";
case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
return "foreign channel exit";
default:
return "unknown";
}
}
static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
struct wmi_scan_event *scan_evt_param)
{
const void **tb;
const struct wmi_scan_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_SCAN_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch scan ev");
kfree(tb);
return -EPROTO;
}
scan_evt_param->event_type = ev->event_type;
scan_evt_param->reason = ev->reason;
scan_evt_param->channel_freq = ev->channel_freq;
scan_evt_param->scan_req_id = ev->scan_req_id;
scan_evt_param->scan_id = ev->scan_id;
scan_evt_param->vdev_id = ev->vdev_id;
scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
kfree(tb);
return 0;
}
static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
struct wmi_peer_sta_kickout_arg *arg)
{
const void **tb;
const struct wmi_peer_sta_kickout_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch peer sta kickout ev");
kfree(tb);
return -EPROTO;
}
arg->mac_addr = ev->peer_macaddr.addr;
kfree(tb);
return 0;
}
static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
struct wmi_roam_event *roam_ev)
{
const void **tb;
const struct wmi_roam_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_ROAM_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch roam ev");
kfree(tb);
return -EPROTO;
}
roam_ev->vdev_id = ev->vdev_id;
roam_ev->reason = ev->reason;
roam_ev->rssi = ev->rssi;
kfree(tb);
return 0;
}
static int freq_to_idx(struct ath12k *ar, int freq)
{
struct ieee80211_supported_band *sband;
int band, ch, idx = 0;
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
if (!ar->mac.sbands[band].channels)
continue;
sband = ar->hw->wiphy->bands[band];
if (!sband)
continue;
for (ch = 0; ch < sband->n_channels; ch++, idx++)
if (sband->channels[ch].center_freq == freq)
goto exit;
}
exit:
return idx;
}
static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
u32 len, struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_CHAN_INFO_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch chan info ev");
kfree(tb);
return -EPROTO;
}
ch_info_ev->err_code = ev->err_code;
ch_info_ev->freq = ev->freq;
ch_info_ev->cmd_flags = ev->cmd_flags;
ch_info_ev->noise_floor = ev->noise_floor;
ch_info_ev->rx_clear_count = ev->rx_clear_count;
ch_info_ev->cycle_count = ev->cycle_count;
ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
ch_info_ev->rx_frame_count = ev->rx_frame_count;
ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
ch_info_ev->vdev_id = ev->vdev_id;
kfree(tb);
return 0;
}
static int
ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
{
const void **tb;
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
kfree(tb);
return -EPROTO;
}
bss_ch_info_ev->pdev_id = ev->pdev_id;
bss_ch_info_ev->freq = ev->freq;
bss_ch_info_ev->noise_floor = ev->noise_floor;
bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
kfree(tb);
return 0;
}
static int
ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
struct wmi_vdev_install_key_complete_arg *arg)
{
const void **tb;
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch vdev install key compl ev");
kfree(tb);
return -EPROTO;
}
arg->vdev_id = le32_to_cpu(ev->vdev_id);
arg->macaddr = ev->peer_macaddr.addr;
arg->key_idx = le32_to_cpu(ev->key_idx);
arg->key_flags = le32_to_cpu(ev->key_flags);
arg->status = le32_to_cpu(ev->status);
kfree(tb);
return 0;
}
static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
{
const void **tb;
const struct wmi_peer_assoc_conf_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch peer assoc conf ev");
kfree(tb);
return -EPROTO;
}
peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
kfree(tb);
return 0;
}
static int
ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
u32 len, const struct wmi_pdev_temperature_event *ev)
{
const void **tb;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch pdev temp ev");
kfree(tb);
return -EPROTO;
}
kfree(tb);
return 0;
}
static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
{
/* try to send pending beacons first. they take priority */
wake_up(&ab->wmi_ab.tx_credits_wq);
}
static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
struct sk_buff *skb)
{
dev_kfree_skb(skb);
}
static bool ath12k_reg_is_world_alpha(char *alpha)
{
return alpha[0] == '0' && alpha[1] == '0';
}
static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k_reg_info *reg_info = NULL;
struct ieee80211_regdomain *regd = NULL;
bool intersect = false;
int ret = 0, pdev_idx, i, j;
struct ath12k *ar;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
if (!reg_info) {
ret = -ENOMEM;
goto fallback;
}
ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
if (ret) {
ath12k_warn(ab, "failed to extract regulatory info from received event\n");
goto fallback;
}
if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
/* In case of failure to set the requested ctry,
* fw retains the current regd. We print a failure info
* and return from here.
*/
ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
goto mem_free;
}
pdev_idx = reg_info->phy_id;
if (pdev_idx >= ab->num_radios) {
/* Process the event for phy0 only if single_pdev_only
* is true. If pdev_idx is valid but not 0, discard the
* event. Otherwise, it goes to fallback.
*/
if (ab->hw_params->single_pdev_only &&
pdev_idx < ab->hw_params->num_rxmda_per_pdev)
goto mem_free;
else
goto fallback;
}
/* Avoid multiple overwrites to default regd, during core
* stop-start after mac registration.
*/
if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
!memcmp(ab->default_regd[pdev_idx]->alpha2,
reg_info->alpha2, 2))
goto mem_free;
/* Intersect new rules with default regd if a new country setting was
* requested, i.e a default regd was already set during initialization
* and the regd coming from this event has a valid country info.
*/
if (ab->default_regd[pdev_idx] &&
!ath12k_reg_is_world_alpha((char *)
ab->default_regd[pdev_idx]->alpha2) &&
!ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
intersect = true;
regd = ath12k_reg_build_regd(ab, reg_info, intersect);
if (!regd) {
ath12k_warn(ab, "failed to build regd from reg_info\n");
goto fallback;
}
spin_lock(&ab->base_lock);
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
/* Once mac is registered, ar is valid and all CC events from
* fw is considered to be received due to user requests
* currently.
* Free previously built regd before assigning the newly
* generated regd to ar. NULL pointer handling will be
* taken care by kfree itself.
*/
ar = ab->pdevs[pdev_idx].ar;
kfree(ab->new_regd[pdev_idx]);
ab->new_regd[pdev_idx] = regd;
queue_work(ab->workqueue, &ar->regd_update_work);
} else {
/* Multiple events for the same *ar is not expected. But we
* can still clear any previously stored default_regd if we
* are receiving this event for the same radio by mistake.
* NULL pointer handling will be taken care by kfree itself.
*/
kfree(ab->default_regd[pdev_idx]);
/* This regd would be applied during mac registration */
ab->default_regd[pdev_idx] = regd;
}
ab->dfs_region = reg_info->dfs_region;
spin_unlock(&ab->base_lock);
goto mem_free;
fallback:
/* Fallback to older reg (by sending previous country setting
* again if fw has succeeded and we failed to process here.
* The Regdomain should be uniform across driver and fw. Since the
* FW has processed the command and sent a success status, we expect
* this function to succeed as well. If it doesn't, CTRY needs to be
* reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
*/
/* TODO: This is rare, but still should also be handled */
WARN_ON(1);
mem_free:
if (reg_info) {
kfree(reg_info->reg_rules_2g_ptr);
kfree(reg_info->reg_rules_5g_ptr);
if (reg_info->is_ext_reg_event) {
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
kfree(reg_info->reg_rules_6g_ap_ptr[i]);
for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
}
kfree(reg_info);
}
return ret;
}
static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
const void *ptr, void *data)
{
struct ath12k_wmi_rdy_parse *rdy_parse = data;
struct wmi_ready_event fixed_param;
struct ath12k_wmi_mac_addr_params *addr_list;
struct ath12k_pdev *pdev;
u32 num_mac_addr;
int i;
switch (tag) {
case WMI_TAG_READY_EVENT:
memset(&fixed_param, 0, sizeof(fixed_param));
memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
min_t(u16, sizeof(fixed_param), len));
ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
rdy_parse->num_extra_mac_addr =
le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
ether_addr_copy(ab->mac_addr,
fixed_param.ready_event_min.mac_addr.addr);
ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
ab->wmi_ready = true;
break;
case WMI_TAG_ARRAY_FIXED_STRUCT:
addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
num_mac_addr = rdy_parse->num_extra_mac_addr;
if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
break;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
}
ab->pdevs_macaddr_valid = true;
break;
default:
break;
}
return 0;
}
static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k_wmi_rdy_parse rdy_parse = { };
int ret;
ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
ath12k_wmi_rdy_parse, &rdy_parse);
if (ret) {
ath12k_warn(ab, "failed to parse tlv %d\n", ret);
return ret;
}
complete(&ab->wmi_ab.unified_ready);
return 0;
}
static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_delete_resp_event peer_del_resp;
struct ath12k *ar;
if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
ath12k_warn(ab, "failed to extract peer delete resp");
return;
}
rcu_read_lock();
ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
if (!ar) {
ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
peer_del_resp.vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->peer_delete_done);
rcu_read_unlock();
ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
}
static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ath12k *ar;
u32 vdev_id = 0;
if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
ath12k_warn(ab, "failed to extract vdev delete resp");
return;
}
rcu_read_lock();
ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->vdev_delete_done);
rcu_read_unlock();
ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
vdev_id);
}
static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
{
switch (vdev_resp_status) {
case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
return "invalid vdev id";
case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
return "not supported";
case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
return "dfs violation";
case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
return "invalid regdomain";
default:
return "unknown";
}
}
static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_vdev_start_resp_event vdev_start_resp;
struct ath12k *ar;
u32 status;
if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
ath12k_warn(ab, "failed to extract vdev start resp");
return;
}
rcu_read_lock();
ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
if (!ar) {
ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
vdev_start_resp.vdev_id);
rcu_read_unlock();
return;
}
ar->last_wmi_vdev_start_status = 0;
status = le32_to_cpu(vdev_start_resp.status);
if (WARN_ON_ONCE(status)) {
ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
status, ath12k_wmi_vdev_resp_print(status));
ar->last_wmi_vdev_start_status = status;
}
complete(&ar->vdev_setup_done);
rcu_read_unlock();
ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
vdev_start_resp.vdev_id);
}
static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
{
u32 vdev_id, tx_status;
if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
&vdev_id, &tx_status) != 0) {
ath12k_warn(ab, "failed to extract bcn tx status");
return;
}
}
static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k *ar;
u32 vdev_id = 0;
if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
ath12k_warn(ab, "failed to extract vdev stopped event");
return;
}
rcu_read_lock();
ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->vdev_setup_done);
rcu_read_unlock();
ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
}
static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
struct ath12k *ar;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
u16 fc;
struct ieee80211_supported_band *sband;
if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
ath12k_warn(ab, "failed to extract mgmt rx event");
dev_kfree_skb(skb);
return;
}
memset(status, 0, sizeof(*status));
ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
rx_ev.status);
rcu_read_lock();
ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
if (!ar) {
ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
rx_ev.pdev_id);
dev_kfree_skb(skb);
goto exit;
}
if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
(rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
WMI_RX_STATUS_ERR_CRC))) {
dev_kfree_skb(skb);
goto exit;
}
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
* mac80211 has been changed.
*/
WARN_ON_ONCE(1);
dev_kfree_skb(skb);
goto exit;
}
if (rx_ev.phy_mode == MODE_11B &&
(status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
ath12k_dbg(ab, ATH12K_DBG_WMI,
"wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
sband = &ar->mac.sbands[status->band];
status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
status->band);
status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
/* Firmware is guaranteed to report all essential management frames via
* WMI while it can deliver some extra via HTT. Since there can be
* duplicates split the reporting wrt monitor/sniffing.
*/
status->flag |= RX_FLAG_SKIP_MONITOR;
/* In case of PMF, FW delivers decrypted frames with Protected Bit set
* including group privacy action frames.
*/
if (ieee80211_has_protected(hdr->frame_control)) {
status->flag |= RX_FLAG_DECRYPTED;
if (!ieee80211_is_robust_mgmt_frame(skb)) {
status->flag |= RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(fc &
~IEEE80211_FCTL_PROTECTED);
}
}
/* TODO: Pending handle beacon implementation
*if (ieee80211_is_beacon(hdr->frame_control))
* ath12k_mac_handle_beacon(ar, skb);
*/
ath12k_dbg(ab, ATH12K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
ath12k_dbg(ab, ATH12K_DBG_MGMT,
"event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
status->freq, status->band, status->signal,
status->rate_idx);
ieee80211_rx_ni(ar->hw, skb);
exit:
rcu_read_unlock();
}
static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
struct ath12k *ar;
if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
ath12k_warn(ab, "failed to extract mgmt tx compl event");
return;
}
rcu_read_lock();
ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
if (!ar) {
ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
tx_compl_param.pdev_id);
goto exit;
}
wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
le32_to_cpu(tx_compl_param.status));
ath12k_dbg(ab, ATH12K_DBG_MGMT,
"mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
tx_compl_param.pdev_id, tx_compl_param.desc_id,
tx_compl_param.status);
exit:
rcu_read_unlock();
}
static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab,
u32 vdev_id)
{
int i;
struct ath12k_pdev *pdev;
struct ath12k *ar;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar) {
ar = pdev->ar;
spin_lock_bh(&ar->data_lock);
if (ar->scan.state == ATH12K_SCAN_ABORTING &&
ar->scan.vdev_id == vdev_id) {
spin_unlock_bh(&ar->data_lock);
return ar;
}
spin_unlock_bh(&ar->data_lock);
}
}
return NULL;
}
static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k *ar;
struct wmi_scan_event scan_ev = {0};
if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
ath12k_warn(ab, "failed to extract scan event");
return;
}
rcu_read_lock();
/* In case the scan was cancelled, ex. during interface teardown,
* the interface will not be found in active interfaces.
* Rather, in such scenarios, iterate over the active pdev's to
* search 'ar' if the corresponding 'ar' scan is ABORTING and the
* aborting scan's vdev id matches this event info.
*/
if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED)
ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id));
else
ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
if (!ar) {
ath12k_warn(ab, "Received scan event for unknown vdev");
rcu_read_unlock();
return;
}
spin_lock_bh(&ar->data_lock);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
le32_to_cpu(scan_ev.reason)),
le32_to_cpu(scan_ev.event_type),
le32_to_cpu(scan_ev.reason),
le32_to_cpu(scan_ev.channel_freq),
le32_to_cpu(scan_ev.scan_req_id),
le32_to_cpu(scan_ev.scan_id),
le32_to_cpu(scan_ev.vdev_id),
ath12k_scan_state_str(ar->scan.state), ar->scan.state);
switch (le32_to_cpu(scan_ev.event_type)) {
case WMI_SCAN_EVENT_STARTED:
ath12k_wmi_event_scan_started(ar);
break;
case WMI_SCAN_EVENT_COMPLETED:
ath12k_wmi_event_scan_completed(ar);
break;
case WMI_SCAN_EVENT_BSS_CHANNEL:
ath12k_wmi_event_scan_bss_chan(ar);
break;
case WMI_SCAN_EVENT_FOREIGN_CHAN:
ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
break;
case WMI_SCAN_EVENT_START_FAILED:
ath12k_warn(ab, "received scan start failure event\n");
ath12k_wmi_event_scan_start_failed(ar);
break;
case WMI_SCAN_EVENT_DEQUEUED:
__ath12k_mac_scan_finish(ar);
break;
case WMI_SCAN_EVENT_PREEMPTED:
case WMI_SCAN_EVENT_RESTARTED:
case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
default:
break;
}
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
}
static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_sta_kickout_arg arg = {};
struct ieee80211_sta *sta;
struct ath12k_peer *peer;
struct ath12k *ar;
if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
ath12k_warn(ab, "failed to extract peer sta kickout event");
return;
}
rcu_read_lock();
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
if (!peer) {
ath12k_warn(ab, "peer not found %pM\n",
arg.mac_addr);
goto exit;
}
ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
if (!ar) {
ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
peer->vdev_id);
goto exit;
}
sta = ieee80211_find_sta_by_ifaddr(ar->hw,
arg.mac_addr, NULL);
if (!sta) {
ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
arg.mac_addr);
goto exit;
}
ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
arg.mac_addr);
ieee80211_report_low_ack(sta, 10);
exit:
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
}
static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_roam_event roam_ev = {};
struct ath12k *ar;
if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
ath12k_warn(ab, "failed to extract roam event");
return;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"wmi roam event vdev %u reason 0x%08x rssi %d\n",
roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
rcu_read_lock();
ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
if (!ar) {
ath12k_warn(ab, "invalid vdev id in roam ev %d",
roam_ev.vdev_id);
rcu_read_unlock();
return;
}
if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
roam_ev.reason, roam_ev.vdev_id);
switch (le32_to_cpu(roam_ev.reason)) {
case WMI_ROAM_REASON_BEACON_MISS:
/* TODO: Pending beacon miss and connection_loss_work
* implementation
* ath12k_mac_handle_beacon_miss(ar, vdev_id);
*/
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
case WMI_ROAM_REASON_HO_FAILED:
ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
roam_ev.reason, roam_ev.vdev_id);
break;
}
rcu_read_unlock();
}
static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_chan_info_event ch_info_ev = {0};
struct ath12k *ar;
struct survey_info *survey;
int idx;
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
ath12k_warn(ab, "failed to extract chan info event");
return;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
ch_info_ev.mac_clk_mhz);
if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
return;
}
rcu_read_lock();
ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
if (!ar) {
ath12k_warn(ab, "invalid vdev id in chan info ev %d",
ch_info_ev.vdev_id);
rcu_read_unlock();
return;
}
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
case ATH12K_SCAN_STARTING:
ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
goto exit;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
break;
}
idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
if (idx >= ARRAY_SIZE(ar->survey)) {
ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
ch_info_ev.freq, idx);
goto exit;
}
/* If FW provides MAC clock frequency in Mhz, overriding the initialized
* HW channel counters frequency value
*/
if (ch_info_ev.mac_clk_mhz)
cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
survey = &ar->survey[idx];
memset(survey, 0, sizeof(*survey));
survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY;
survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
cc_freq_hz);
}
exit:
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
}
static void
ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
struct survey_info *survey;
struct ath12k *ar;
u32 cc_freq_hz = ab->cc_freq_hz;
u64 busy, total, tx, rx, rx_bss;
int idx;
if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
ath12k_warn(ab, "failed to extract pdev bss chan info event");
return;
}
busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
le32_to_cpu(bss_ch_info_ev.cycle_count_low);
tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
bss_ch_info_ev.noise_floor, busy, total,
tx, rx, rx_bss);
rcu_read_lock();
ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
if (!ar) {
ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
bss_ch_info_ev.pdev_id);
rcu_read_unlock();
return;
}
spin_lock_bh(&ar->data_lock);
idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
if (idx >= ARRAY_SIZE(ar->survey)) {
ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
bss_ch_info_ev.freq, idx);
goto exit;
}
survey = &ar->survey[idx];
survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor);
survey->time = div_u64(total, cc_freq_hz);
survey->time_busy = div_u64(busy, cc_freq_hz);
survey->time_rx = div_u64(rx_bss, cc_freq_hz);
survey->time_tx = div_u64(tx, cc_freq_hz);
survey->filled |= (SURVEY_INFO_NOISE_DBM |
SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_TX);
exit:
spin_unlock_bh(&ar->data_lock);
complete(&ar->bss_survey_done);
rcu_read_unlock();
}
static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
struct ath12k *ar;
if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
ath12k_warn(ab, "failed to extract install key compl event");
return;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
install_key_compl.key_idx, install_key_compl.key_flags,
install_key_compl.macaddr, install_key_compl.status);
rcu_read_lock();
ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
if (!ar) {
ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
install_key_compl.vdev_id);
rcu_read_unlock();
return;
}
ar->install_key_status = 0;
if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
ath12k_warn(ab, "install key failed for %pM status %d\n",
install_key_compl.macaddr, install_key_compl.status);
ar->install_key_status = install_key_compl.status;
}
complete(&ar->install_key_done);
rcu_read_unlock();
}
static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr,
void *data)
{
const struct wmi_service_available_event *ev;
u32 *wmi_ext2_service_bitmap;
int i, j;
u16 expected_len;
expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
if (len < expected_len) {
ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
len, tag);
return -EINVAL;
}
switch (tag) {
case WMI_TAG_SERVICE_AVAILABLE_EVENT:
ev = (struct wmi_service_available_event *)ptr;
for (i = 0, j = WMI_MAX_SERVICE;
i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
i++) {
do {
if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
set_bit(j, ab->wmi_ab.svc_map);
} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
ev->wmi_service_segment_bitmap[0],
ev->wmi_service_segment_bitmap[1],
ev->wmi_service_segment_bitmap[2],
ev->wmi_service_segment_bitmap[3]);
break;
case WMI_TAG_ARRAY_UINT32:
wmi_ext2_service_bitmap = (u32 *)ptr;
for (i = 0, j = WMI_MAX_EXT_SERVICE;
i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
i++) {
do {
if (wmi_ext2_service_bitmap[i] &
BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
set_bit(j, ab->wmi_ab.svc_map);
} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
break;
}
return 0;
}
static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
{
int ret;
ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
ath12k_wmi_tlv_services_parser,
NULL);
return ret;
}
static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
struct ath12k *ar;
if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
ath12k_warn(ab, "failed to extract peer assoc conf event");
return;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"peer assoc conf ev vdev id %d macaddr %pM\n",
peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
rcu_read_lock();
ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
if (!ar) {
ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
peer_assoc_conf.vdev_id);
rcu_read_unlock();
return;
}
complete(&ar->peer_assoc_done);
rcu_read_unlock();
}
static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
{
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
* is not part of BDF CTL(Conformance test limits) table entries.
*/
static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
kfree(tb);
return;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"pdev ctl failsafe check ev status %d\n",
ev->ctl_failsafe_status);
/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
* to 10 dBm else the CTL power entry in the BDF would be picked up.
*/
if (ev->ctl_failsafe_status != 0)
ath12k_warn(ab, "pdev ctl failsafe failure status %d",
ev->ctl_failsafe_status);
kfree(tb);
}
static void
ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const struct ath12k_wmi_pdev_csa_event *ev,
const u32 *vdev_ids)
{
int i;
struct ath12k_vif *arvif;
/* Finish CSA once the switch count becomes NULL */
if (ev->current_switch_count)
return;
rcu_read_lock();
for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
if (!arvif) {
ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
vdev_ids[i]);
continue;
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
ieee80211_csa_finish(arvif->vif);
}
rcu_read_unlock();
}
static void
ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct ath12k_wmi_pdev_csa_event *ev;
const u32 *vdev_ids;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
if (!ev || !vdev_ids) {
ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
kfree(tb);
return;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"pdev csa switch count %d for pdev %d, num_vdevs %d",
ev->current_switch_count, ev->pdev_id,
ev->num_vdevs);
ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
kfree(tb);
}
static void
ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
{
const void **tb;
const struct ath12k_wmi_pdev_radar_event *ev;
struct ath12k *ar;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
kfree(tb);
return;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
ev->freq_offset, ev->sidx);
ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
if (!ar) {
ath12k_warn(ab, "radar detected in invalid pdev %d\n",
ev->pdev_id);
goto exit;
}
ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
ev->pdev_id);
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
ieee80211_radar_detected(ar->hw);
exit:
kfree(tb);
}
static void
ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ath12k *ar;
struct wmi_pdev_temperature_event ev = {0};
if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
ath12k_warn(ab, "failed to extract pdev temperature event");
return;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
if (!ar) {
ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
return;
}
}
static void ath12k_fils_discovery_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_fils_discovery_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
"failed to parse FILS discovery event tlv %d\n",
ret);
return;
}
ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
if (!ev) {
ath12k_warn(ab, "failed to fetch FILS discovery event\n");
kfree(tb);
return;
}
ath12k_warn(ab,
"FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
ev->vdev_id, ev->fils_tt, ev->tbtt);
kfree(tb);
}
static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
"failed to parse probe response transmission status event tlv: %d\n",
ret);
return;
}
ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
if (!ev) {
ath12k_warn(ab,
"failed to fetch probe response transmission status event");
kfree(tb);
return;
}
if (ev->tx_status)
ath12k_warn(ab,
"Probe response transmission failed for vdev_id %u, status %u\n",
ev->vdev_id, ev->tx_status);
kfree(tb);
}
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_tlv_event_id id;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
goto out;
switch (id) {
/* Process all the WMI events here */
case WMI_SERVICE_READY_EVENTID:
ath12k_service_ready_event(ab, skb);
break;
case WMI_SERVICE_READY_EXT_EVENTID:
ath12k_service_ready_ext_event(ab, skb);
break;
case WMI_SERVICE_READY_EXT2_EVENTID:
ath12k_service_ready_ext2_event(ab, skb);
break;
case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
ath12k_reg_chan_list_event(ab, skb);
break;
case WMI_READY_EVENTID:
ath12k_ready_event(ab, skb);
break;
case WMI_PEER_DELETE_RESP_EVENTID:
ath12k_peer_delete_resp_event(ab, skb);
break;
case WMI_VDEV_START_RESP_EVENTID:
ath12k_vdev_start_resp_event(ab, skb);
break;
case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
ath12k_bcn_tx_status_event(ab, skb);
break;
case WMI_VDEV_STOPPED_EVENTID:
ath12k_vdev_stopped_event(ab, skb);
break;
case WMI_MGMT_RX_EVENTID:
ath12k_mgmt_rx_event(ab, skb);
/* mgmt_rx_event() owns the skb now! */
return;
case WMI_MGMT_TX_COMPLETION_EVENTID:
ath12k_mgmt_tx_compl_event(ab, skb);
break;
case WMI_SCAN_EVENTID:
ath12k_scan_event(ab, skb);
break;
case WMI_PEER_STA_KICKOUT_EVENTID:
ath12k_peer_sta_kickout_event(ab, skb);
break;
case WMI_ROAM_EVENTID:
ath12k_roam_event(ab, skb);
break;
case WMI_CHAN_INFO_EVENTID:
ath12k_chan_info_event(ab, skb);
break;
case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
ath12k_pdev_bss_chan_info_event(ab, skb);
break;
case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
ath12k_vdev_install_key_compl_event(ab, skb);
break;
case WMI_SERVICE_AVAILABLE_EVENTID:
ath12k_service_available_event(ab, skb);
break;
case WMI_PEER_ASSOC_CONF_EVENTID:
ath12k_peer_assoc_conf_event(ab, skb);
break;
case WMI_UPDATE_STATS_EVENTID:
ath12k_update_stats_event(ab, skb);
break;
case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
ath12k_pdev_ctl_failsafe_check_event(ab, skb);
break;
case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
break;
case WMI_PDEV_TEMPERATURE_EVENTID:
ath12k_wmi_pdev_temperature_event(ab, skb);
break;
case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
break;
case WMI_HOST_FILS_DISCOVERY_EVENTID:
ath12k_fils_discovery_event(ab, skb);
break;
case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
ath12k_probe_resp_tx_status_event(ab, skb);
break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
ath12k_dbg(ab, ATH12K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);
break;
case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
break;
case WMI_VDEV_DELETE_RESP_EVENTID:
ath12k_vdev_delete_resp_event(ab, skb);
break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
break;
}
out:
dev_kfree_skb(skb);
}
static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
u32 pdev_idx)
{
int status;
u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
struct ath12k_htc_svc_conn_req conn_req = {};
struct ath12k_htc_svc_conn_resp conn_resp = {};
/* these fields are the same for all service endpoints */
conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = svc_id[pdev_idx];
status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
if (status) {
ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
status);
return status;
}
ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
return 0;
}
static int
ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
struct wmi_unit_test_cmd ut_cmd,
u32 *test_args)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_unit_test_cmd *cmd;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 *ut_cmd_args;
int buf_len, arg_len;
int ret;
int i;
arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_unit_test_cmd *)skb->data;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
sizeof(ut_cmd));
cmd->vdev_id = ut_cmd.vdev_id;
cmd->module_id = ut_cmd.module_id;
cmd->num_args = ut_cmd.num_args;
cmd->diag_token = ut_cmd.diag_token;
ptr = skb->data + sizeof(ut_cmd);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
ptr += TLV_HDR_SIZE;
ut_cmd_args = ptr;
for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
ut_cmd_args[i] = test_args[i];
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI unit test : module %d vdev %d n_args %d token %d\n",
cmd->module_id, cmd->vdev_id, cmd->num_args,
cmd->diag_token);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
ret);
dev_kfree_skb(skb);
}
return ret;
}
int ath12k_wmi_simulate_radar(struct ath12k *ar)
{
struct ath12k_vif *arvif;
u32 dfs_args[DFS_MAX_TEST_ARGS];
struct wmi_unit_test_cmd wmi_ut;
bool arvif_found = false;
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arvif_found = true;
break;
}
}
if (!arvif_found)
return -EINVAL;
dfs_args[DFS_TEST_CMDID] = 0;
dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
/* Currently we could pass segment_id(b0 - b1), chirp(b2)
* freq offset (b3 - b10) to unit test. For simulation
* purpose this can be set to 0 which is valid.
*/
dfs_args[DFS_TEST_RADAR_PARAM] = 0;
wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
}
int ath12k_wmi_connect(struct ath12k_base *ab)
{
u32 i;
u8 wmi_ep_count;
wmi_ep_count = ab->htc.wmi_ep_count;
if (wmi_ep_count > ab->hw_params->max_radios)
return -1;
for (i = 0; i < wmi_ep_count; i++)
ath12k_connect_pdev_htc_service(ab, i);
return 0;
}
static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
{
if (WARN_ON(pdev_id >= MAX_RADIOS))
return;
/* TODO: Deinit any pdev specific wmi resource */
}
int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
u8 pdev_id)
{
struct ath12k_wmi_pdev *wmi_handle;
if (pdev_id >= ab->hw_params->max_radios)
return -EINVAL;
wmi_handle = &ab->wmi_ab.wmi[pdev_id];
wmi_handle->wmi_ab = &ab->wmi_ab;
ab->wmi_ab.ab = ab;
/* TODO: Init remaining resource specific to pdev */
return 0;
}
int ath12k_wmi_attach(struct ath12k_base *ab)
{
int ret;
ret = ath12k_wmi_pdev_attach(ab, 0);
if (ret)
return ret;
ab->wmi_ab.ab = ab;
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
/* It's overwritten when service_ext_ready is handled */
if (ab->hw_params->single_pdev_only)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
/* TODO: Init remaining wmi soc resources required */
init_completion(&ab->wmi_ab.service_ready);
init_completion(&ab->wmi_ab.unified_ready);
return 0;
}
void ath12k_wmi_detach(struct ath12k_base *ab)
{
int i;
/* TODO: Deinit wmi resource specific to SOC as required */
for (i = 0; i < ab->htc.wmi_ep_count; i++)
ath12k_wmi_pdev_detach(ab, i);
ath12k_wmi_free_dbring_caps(ab);
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/wmi.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include "mac.h"
#include "core.h"
#include "debug.h"
#include "wmi.h"
#include "hw.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "peer.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN5G(_channel, _freq, _flags) { \
.band = NL80211_BAND_5GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN6G(_channel, _freq, _flags) { \
.band = NL80211_BAND_6GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
static const struct ieee80211_channel ath12k_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
CHAN2G(3, 2422, 0),
CHAN2G(4, 2427, 0),
CHAN2G(5, 2432, 0),
CHAN2G(6, 2437, 0),
CHAN2G(7, 2442, 0),
CHAN2G(8, 2447, 0),
CHAN2G(9, 2452, 0),
CHAN2G(10, 2457, 0),
CHAN2G(11, 2462, 0),
CHAN2G(12, 2467, 0),
CHAN2G(13, 2472, 0),
CHAN2G(14, 2484, 0),
};
static const struct ieee80211_channel ath12k_5ghz_channels[] = {
CHAN5G(36, 5180, 0),
CHAN5G(40, 5200, 0),
CHAN5G(44, 5220, 0),
CHAN5G(48, 5240, 0),
CHAN5G(52, 5260, 0),
CHAN5G(56, 5280, 0),
CHAN5G(60, 5300, 0),
CHAN5G(64, 5320, 0),
CHAN5G(100, 5500, 0),
CHAN5G(104, 5520, 0),
CHAN5G(108, 5540, 0),
CHAN5G(112, 5560, 0),
CHAN5G(116, 5580, 0),
CHAN5G(120, 5600, 0),
CHAN5G(124, 5620, 0),
CHAN5G(128, 5640, 0),
CHAN5G(132, 5660, 0),
CHAN5G(136, 5680, 0),
CHAN5G(140, 5700, 0),
CHAN5G(144, 5720, 0),
CHAN5G(149, 5745, 0),
CHAN5G(153, 5765, 0),
CHAN5G(157, 5785, 0),
CHAN5G(161, 5805, 0),
CHAN5G(165, 5825, 0),
CHAN5G(169, 5845, 0),
CHAN5G(173, 5865, 0),
};
static const struct ieee80211_channel ath12k_6ghz_channels[] = {
CHAN6G(1, 5955, 0),
CHAN6G(5, 5975, 0),
CHAN6G(9, 5995, 0),
CHAN6G(13, 6015, 0),
CHAN6G(17, 6035, 0),
CHAN6G(21, 6055, 0),
CHAN6G(25, 6075, 0),
CHAN6G(29, 6095, 0),
CHAN6G(33, 6115, 0),
CHAN6G(37, 6135, 0),
CHAN6G(41, 6155, 0),
CHAN6G(45, 6175, 0),
CHAN6G(49, 6195, 0),
CHAN6G(53, 6215, 0),
CHAN6G(57, 6235, 0),
CHAN6G(61, 6255, 0),
CHAN6G(65, 6275, 0),
CHAN6G(69, 6295, 0),
CHAN6G(73, 6315, 0),
CHAN6G(77, 6335, 0),
CHAN6G(81, 6355, 0),
CHAN6G(85, 6375, 0),
CHAN6G(89, 6395, 0),
CHAN6G(93, 6415, 0),
CHAN6G(97, 6435, 0),
CHAN6G(101, 6455, 0),
CHAN6G(105, 6475, 0),
CHAN6G(109, 6495, 0),
CHAN6G(113, 6515, 0),
CHAN6G(117, 6535, 0),
CHAN6G(121, 6555, 0),
CHAN6G(125, 6575, 0),
CHAN6G(129, 6595, 0),
CHAN6G(133, 6615, 0),
CHAN6G(137, 6635, 0),
CHAN6G(141, 6655, 0),
CHAN6G(145, 6675, 0),
CHAN6G(149, 6695, 0),
CHAN6G(153, 6715, 0),
CHAN6G(157, 6735, 0),
CHAN6G(161, 6755, 0),
CHAN6G(165, 6775, 0),
CHAN6G(169, 6795, 0),
CHAN6G(173, 6815, 0),
CHAN6G(177, 6835, 0),
CHAN6G(181, 6855, 0),
CHAN6G(185, 6875, 0),
CHAN6G(189, 6895, 0),
CHAN6G(193, 6915, 0),
CHAN6G(197, 6935, 0),
CHAN6G(201, 6955, 0),
CHAN6G(205, 6975, 0),
CHAN6G(209, 6995, 0),
CHAN6G(213, 7015, 0),
CHAN6G(217, 7035, 0),
CHAN6G(221, 7055, 0),
CHAN6G(225, 7075, 0),
CHAN6G(229, 7095, 0),
CHAN6G(233, 7115, 0),
};
static struct ieee80211_rate ath12k_legacy_rates[] = {
{ .bitrate = 10,
.hw_value = ATH12K_HW_RATE_CCK_LP_1M },
{ .bitrate = 20,
.hw_value = ATH12K_HW_RATE_CCK_LP_2M,
.hw_value_short = ATH12K_HW_RATE_CCK_SP_2M,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = ATH12K_HW_RATE_CCK_LP_5_5M,
.hw_value_short = ATH12K_HW_RATE_CCK_SP_5_5M,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = ATH12K_HW_RATE_CCK_LP_11M,
.hw_value_short = ATH12K_HW_RATE_CCK_SP_11M,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60, .hw_value = ATH12K_HW_RATE_OFDM_6M },
{ .bitrate = 90, .hw_value = ATH12K_HW_RATE_OFDM_9M },
{ .bitrate = 120, .hw_value = ATH12K_HW_RATE_OFDM_12M },
{ .bitrate = 180, .hw_value = ATH12K_HW_RATE_OFDM_18M },
{ .bitrate = 240, .hw_value = ATH12K_HW_RATE_OFDM_24M },
{ .bitrate = 360, .hw_value = ATH12K_HW_RATE_OFDM_36M },
{ .bitrate = 480, .hw_value = ATH12K_HW_RATE_OFDM_48M },
{ .bitrate = 540, .hw_value = ATH12K_HW_RATE_OFDM_54M },
};
static const int
ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = {
[NL80211_BAND_2GHZ] = {
[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20_2G,
[NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20_2G,
[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40_2G,
[NL80211_CHAN_WIDTH_80] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_320] = MODE_UNKNOWN,
},
[NL80211_BAND_5GHZ] = {
[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20,
[NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20,
[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40,
[NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80,
[NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160,
[NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80,
[NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320,
},
[NL80211_BAND_6GHZ] = {
[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20,
[NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20,
[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40,
[NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80,
[NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160,
[NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80,
[NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320,
},
};
const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = {
.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
.pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
HTT_RX_FP_CTRL_FILTER_FLASG3
};
#define ATH12K_MAC_FIRST_OFDM_RATE_IDX 4
#define ath12k_g_rates ath12k_legacy_rates
#define ath12k_g_rates_size (ARRAY_SIZE(ath12k_legacy_rates))
#define ath12k_a_rates (ath12k_legacy_rates + 4)
#define ath12k_a_rates_size (ARRAY_SIZE(ath12k_legacy_rates) - 4)
#define ATH12K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */
static const u32 ath12k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
[WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
[WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
{
switch (mode) {
case MODE_11A:
return "11a";
case MODE_11G:
return "11g";
case MODE_11B:
return "11b";
case MODE_11GONLY:
return "11gonly";
case MODE_11NA_HT20:
return "11na-ht20";
case MODE_11NG_HT20:
return "11ng-ht20";
case MODE_11NA_HT40:
return "11na-ht40";
case MODE_11NG_HT40:
return "11ng-ht40";
case MODE_11AC_VHT20:
return "11ac-vht20";
case MODE_11AC_VHT40:
return "11ac-vht40";
case MODE_11AC_VHT80:
return "11ac-vht80";
case MODE_11AC_VHT160:
return "11ac-vht160";
case MODE_11AC_VHT80_80:
return "11ac-vht80+80";
case MODE_11AC_VHT20_2G:
return "11ac-vht20-2g";
case MODE_11AC_VHT40_2G:
return "11ac-vht40-2g";
case MODE_11AC_VHT80_2G:
return "11ac-vht80-2g";
case MODE_11AX_HE20:
return "11ax-he20";
case MODE_11AX_HE40:
return "11ax-he40";
case MODE_11AX_HE80:
return "11ax-he80";
case MODE_11AX_HE80_80:
return "11ax-he80+80";
case MODE_11AX_HE160:
return "11ax-he160";
case MODE_11AX_HE20_2G:
return "11ax-he20-2g";
case MODE_11AX_HE40_2G:
return "11ax-he40-2g";
case MODE_11AX_HE80_2G:
return "11ax-he80-2g";
case MODE_11BE_EHT20:
return "11be-eht20";
case MODE_11BE_EHT40:
return "11be-eht40";
case MODE_11BE_EHT80:
return "11be-eht80";
case MODE_11BE_EHT80_80:
return "11be-eht80+80";
case MODE_11BE_EHT160:
return "11be-eht160";
case MODE_11BE_EHT160_160:
return "11be-eht160+160";
case MODE_11BE_EHT320:
return "11be-eht320";
case MODE_11BE_EHT20_2G:
return "11be-eht20-2g";
case MODE_11BE_EHT40_2G:
return "11be-eht40-2g";
case MODE_UNKNOWN:
/* skip */
break;
/* no default handler to allow compiler to check that the
* enum is fully handled
*/
}
return "<unknown>";
}
enum rate_info_bw
ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
{
u8 ret = RATE_INFO_BW_20;
switch (bw) {
case ATH12K_BW_20:
ret = RATE_INFO_BW_20;
break;
case ATH12K_BW_40:
ret = RATE_INFO_BW_40;
break;
case ATH12K_BW_80:
ret = RATE_INFO_BW_80;
break;
case ATH12K_BW_160:
ret = RATE_INFO_BW_160;
break;
}
return ret;
}
enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw)
{
switch (bw) {
case RATE_INFO_BW_20:
return ATH12K_BW_20;
case RATE_INFO_BW_40:
return ATH12K_BW_40;
case RATE_INFO_BW_80:
return ATH12K_BW_80;
case RATE_INFO_BW_160:
return ATH12K_BW_160;
default:
return ATH12K_BW_20;
}
}
int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate)
{
/* As default, it is OFDM rates */
int i = ATH12K_MAC_FIRST_OFDM_RATE_IDX;
int max_rates_idx = ath12k_g_rates_size;
if (preamble == WMI_RATE_PREAMBLE_CCK) {
hw_rc &= ~ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK;
i = 0;
max_rates_idx = ATH12K_MAC_FIRST_OFDM_RATE_IDX;
}
while (i < max_rates_idx) {
if (hw_rc == ath12k_legacy_rates[i].hw_value) {
*rateidx = i;
*rate = ath12k_legacy_rates[i].bitrate;
return 0;
}
i++;
}
return -EINVAL;
}
u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
u32 bitrate)
{
int i;
for (i = 0; i < sband->n_bitrates; i++)
if (sband->bitrates[i].bitrate == bitrate)
return i;
return 0;
}
static u32
ath12k_mac_max_ht_nss(const u8 *ht_mcs_mask)
{
int nss;
for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
if (ht_mcs_mask[nss])
return nss + 1;
return 1;
}
static u32
ath12k_mac_max_vht_nss(const u16 *vht_mcs_mask)
{
int nss;
for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
if (vht_mcs_mask[nss])
return nss + 1;
return 1;
}
static u8 ath12k_parse_mpdudensity(u8 mpdudensity)
{
/* From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing":
* 0 for no restriction
* 1 for 1/4 us
* 2 for 1/2 us
* 3 for 1 us
* 4 for 2 us
* 5 for 4 us
* 6 for 8 us
* 7 for 16 us
*/
switch (mpdudensity) {
case 0:
return 0;
case 1:
case 2:
case 3:
/* Our lower layer calculations limit our precision to
* 1 microsecond
*/
return 1;
case 4:
return 2;
case 5:
return 4;
case 6:
return 8;
case 7:
return 16;
default:
return 0;
}
}
static int ath12k_mac_vif_chan(struct ieee80211_vif *vif,
struct cfg80211_chan_def *def)
{
struct ieee80211_chanctx_conf *conf;
rcu_read_lock();
conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (!conf) {
rcu_read_unlock();
return -ENOENT;
}
*def = conf->def;
rcu_read_unlock();
return 0;
}
static bool ath12k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
case 10:
case 20:
case 55:
case 110:
return true;
}
return false;
}
u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
u8 hw_rate, bool cck)
{
const struct ieee80211_rate *rate;
int i;
for (i = 0; i < sband->n_bitrates; i++) {
rate = &sband->bitrates[i];
if (ath12k_mac_bitrate_is_cck(rate->bitrate) != cck)
continue;
if (rate->hw_value == hw_rate)
return i;
else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
rate->hw_value_short == hw_rate)
return i;
}
return 0;
}
static u8 ath12k_mac_bitrate_to_rate(int bitrate)
{
return DIV_ROUND_UP(bitrate, 5) |
(ath12k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
}
static void ath12k_get_arvif_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath12k_vif_iter *arvif_iter = data;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
if (arvif->vdev_id == arvif_iter->vdev_id)
arvif_iter->arvif = arvif;
}
struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
{
struct ath12k_vif_iter arvif_iter = {};
u32 flags;
arvif_iter.vdev_id = vdev_id;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
ieee80211_iterate_active_interfaces_atomic(ar->hw,
flags,
ath12k_get_arvif_iter,
&arvif_iter);
if (!arvif_iter.arvif) {
ath12k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
return NULL;
}
return arvif_iter.arvif;
}
struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
u32 vdev_id)
{
int i;
struct ath12k_pdev *pdev;
struct ath12k_vif *arvif;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar) {
arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
}
}
return NULL;
}
struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id)
{
int i;
struct ath12k_pdev *pdev;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar) {
if (pdev->ar->allocated_vdev_map & (1LL << vdev_id))
return pdev->ar;
}
}
return NULL;
}
struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
{
int i;
struct ath12k_pdev *pdev;
if (ab->hw_params->single_pdev_only) {
pdev = rcu_dereference(ab->pdevs_active[0]);
return pdev ? pdev->ar : NULL;
}
if (WARN_ON(pdev_id > ab->num_radios))
return NULL;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->pdev_id == pdev_id)
return (pdev->ar ? pdev->ar : NULL);
}
return NULL;
}
static void ath12k_pdev_caps_update(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
ar->max_tx_power = ab->target_caps.hw_max_tx_power;
/* FIXME: Set min_tx_power to ab->target_caps.hw_min_tx_power.
* But since the received value in svcrdy is same as hw_max_tx_power,
* we can set ar->min_tx_power to 0 currently until
* this is fixed in firmware
*/
ar->min_tx_power = 0;
ar->txpower_limit_2g = ar->max_tx_power;
ar->txpower_limit_5g = ar->max_tx_power;
ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
}
static int ath12k_mac_txpower_recalc(struct ath12k *ar)
{
struct ath12k_pdev *pdev = ar->pdev;
struct ath12k_vif *arvif;
int ret, txpower = -1;
u32 param;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->txpower <= 0)
continue;
if (txpower == -1)
txpower = arvif->txpower;
else
txpower = min(txpower, arvif->txpower);
}
if (txpower == -1)
return 0;
/* txpwr is set as 2 units per dBm in FW*/
txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
ar->max_tx_power) * 2;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n",
txpower / 2);
if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
ar->txpower_limit_2g != txpower) {
param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
ret = ath12k_wmi_pdev_set_param(ar, param,
txpower, ar->pdev->pdev_id);
if (ret)
goto fail;
ar->txpower_limit_2g = txpower;
}
if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
ar->txpower_limit_5g != txpower) {
param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
ret = ath12k_wmi_pdev_set_param(ar, param,
txpower, ar->pdev->pdev_id);
if (ret)
goto fail;
ar->txpower_limit_5g = txpower;
}
return 0;
fail:
ath12k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
txpower / 2, param, ret);
return ret;
}
static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
u32 vdev_param, rts_cts;
int ret;
lockdep_assert_held(&ar->conf_mutex);
vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
/* Enable RTS/CTS protection for sw retries (when legacy stations
* are in BSS) or by default only for second rate series.
* TODO: Check if we need to enable CTS 2 Self in any case
*/
rts_cts = WMI_USE_RTS_CTS;
if (arvif->num_legacy_stations > 0)
rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4;
else
rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4;
/* Need not send duplicate param value to firmware */
if (arvif->rtscts_prot_mode == rts_cts)
return 0;
arvif->rtscts_prot_mode = rts_cts;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
arvif->vdev_id, rts_cts);
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, rts_cts);
if (ret)
ath12k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
static int ath12k_mac_set_kickout(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
u32 param;
int ret;
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
ATH12K_KICKOUT_THRESHOLD,
ar->pdev->pdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
ATH12K_KEEPALIVE_MIN_IDLE);
if (ret) {
ath12k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
ATH12K_KEEPALIVE_MAX_IDLE);
if (ret) {
ath12k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
ATH12K_KEEPALIVE_MAX_UNRESPONSIVE);
if (ret) {
ath12k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
return 0;
}
void ath12k_mac_peer_cleanup_all(struct ath12k *ar)
{
struct ath12k_peer *peer, *tmp;
struct ath12k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
list_del(&peer->list);
kfree(peer);
}
spin_unlock_bh(&ab->base_lock);
ar->num_peers = 0;
ar->num_stations = 0;
}
static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev setup timeout %d\n",
ATH12K_VDEV_SETUP_TIMEOUT_HZ);
if (!wait_for_completion_timeout(&ar->vdev_setup_done,
ATH12K_VDEV_SETUP_TIMEOUT_HZ))
return -ETIMEDOUT;
return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
}
static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id)
{
int ret;
ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
return ret;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n",
vdev_id);
return 0;
}
static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_channel *channel;
struct wmi_vdev_start_req_arg arg = {};
int ret;
lockdep_assert_held(&ar->conf_mutex);
channel = chandef->chan;
arg.vdev_id = vdev_id;
arg.freq = channel->center_freq;
arg.band_center_freq1 = chandef->center_freq1;
arg.band_center_freq2 = chandef->center_freq2;
arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width];
arg.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
arg.min_power = 0;
arg.max_power = channel->max_power;
arg.max_reg_power = channel->max_reg_power;
arg.max_antenna_gain = channel->max_antenna_gain;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
arg.punct_bitmap = 0xFFFFFFFF;
arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
reinit_completion(&ar->vdev_setup_done);
reinit_completion(&ar->vdev_delete_done);
ret = ath12k_wmi_vdev_start(ar, &arg, false);
if (ret) {
ath12k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
vdev_id, ret);
return ret;
}
ret = ath12k_mac_vdev_setup_sync(ar);
if (ret) {
ath12k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
vdev_id, ret);
return ret;
}
ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
goto vdev_stop;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n",
vdev_id);
return 0;
vdev_stop:
ret = ath12k_wmi_vdev_stop(ar, vdev_id);
if (ret)
ath12k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
vdev_id, ret);
return ret;
}
static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
ret = ath12k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
ath12k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ret = ath12k_mac_vdev_setup_sync(ar);
if (ret)
ath12k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ret = ath12k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
ath12k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i stopped\n",
ar->monitor_vdev_id);
return ret;
}
static int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
{
struct ath12k_pdev *pdev = ar->pdev;
struct ath12k_wmi_vdev_create_arg arg = {};
int bit, ret;
u8 tmp_addr[6];
u16 nss;
lockdep_assert_held(&ar->conf_mutex);
if (ar->monitor_vdev_created)
return 0;
if (ar->ab->free_vdev_map == 0) {
ath12k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
return -ENOMEM;
}
bit = __ffs64(ar->ab->free_vdev_map);
ar->monitor_vdev_id = bit;
arg.if_id = ar->monitor_vdev_id;
arg.type = WMI_VDEV_TYPE_MONITOR;
arg.subtype = WMI_VDEV_SUBTYPE_NONE;
arg.pdev_id = pdev->pdev_id;
arg.if_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
}
if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
}
ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg);
if (ret) {
ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
ar->monitor_vdev_id, ret);
ar->monitor_vdev_id = -1;
return ret;
}
nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
WMI_VDEV_PARAM_NSS, nss);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
return ret;
}
ret = ath12k_mac_txpower_recalc(ar);
if (ret)
return ret;
ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
ar->num_created_vdevs++;
ar->monitor_vdev_created = true;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
return 0;
}
static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
{
int ret;
unsigned long time_left;
lockdep_assert_held(&ar->conf_mutex);
if (!ar->monitor_vdev_created)
return 0;
reinit_completion(&ar->vdev_delete_done);
ret = ath12k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
ATH12K_VDEV_DELETE_TIMEOUT_HZ);
if (time_left == 0) {
ath12k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
} else {
ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
ar->num_created_vdevs--;
ar->monitor_vdev_id = -1;
ar->monitor_vdev_created = false;
}
return ret;
}
static void
ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf,
void *data)
{
struct cfg80211_chan_def **def = data;
*def = &conf->def;
}
static int ath12k_mac_monitor_start(struct ath12k *ar)
{
struct cfg80211_chan_def *chandef = NULL;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->monitor_started)
return 0;
ieee80211_iter_chan_contexts_atomic(ar->hw,
ath12k_mac_get_any_chandef_iter,
&chandef);
if (!chandef)
return 0;
ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
if (ret) {
ath12k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
ath12k_mac_monitor_vdev_delete(ar);
return ret;
}
ar->monitor_started = true;
ar->num_started_vdevs++;
ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret);
return ret;
}
static int ath12k_mac_monitor_stop(struct ath12k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (!ar->monitor_started)
return 0;
ret = ath12k_mac_monitor_vdev_stop(ar);
if (ret) {
ath12k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
return ret;
}
ar->monitor_started = false;
ar->num_started_vdevs--;
ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, true);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor stopped ret %d\n", ret);
return ret;
}
static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath12k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR;
if (ar->monitor_conf_enabled) {
if (ar->monitor_vdev_created)
goto exit;
ret = ath12k_mac_monitor_vdev_create(ar);
if (ret)
goto exit;
ret = ath12k_mac_monitor_start(ar);
if (ret)
goto err_mon_del;
} else {
if (!ar->monitor_vdev_created)
goto exit;
ret = ath12k_mac_monitor_stop(ar);
if (ret)
goto exit;
ath12k_mac_monitor_vdev_delete(ar);
}
}
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
err_mon_del:
ath12k_mac_monitor_vdev_delete(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
struct ieee80211_mgmt *mgmt;
u8 *ies;
int ret;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
}
ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
ies += sizeof(mgmt->u.beacon);
if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
arvif->rsnie_present = true;
if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
kfree_skb(bcn);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
ret);
return ret;
}
static void ath12k_control_beaconing(struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath12k *ar = arvif->ar;
int ret;
lockdep_assert_held(&arvif->ar->conf_mutex);
if (!info->enable_beacon) {
ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
ath12k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
arvif->vdev_id, ret);
arvif->is_up = false;
return;
}
/* Install the beacon template to the FW */
ret = ath12k_mac_setup_bcn_tmpl(arvif);
if (ret) {
ath12k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
ret);
return;
}
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
return;
}
arvif->is_up = true;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ath12k_vif *arvif = (void *)vif->drv_priv;
u32 aid;
lockdep_assert_held(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_STATION)
aid = vif->cfg.aid;
else
aid = sta->aid;
ether_addr_copy(arg->peer_mac, sta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
arg->peer_listen_intval = ar->hw->conf.listen_interval;
arg->peer_nss = 1;
arg->peer_caps = vif->bss_conf.assoc_capability;
}
static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ieee80211_bss_conf *info = &vif->bss_conf;
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv;
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
lockdep_assert_held(&ar->conf_mutex);
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return;
bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (arvif->rsnie_present || arvif->wpaie_present) {
arg->need_ptk_4_way = true;
if (arvif->wpaie_present)
arg->need_gtk_2_way = true;
} else if (bss) {
const struct cfg80211_bss_ies *ies;
rcu_read_lock();
rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
ies = rcu_dereference(bss->ies);
wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
ies->data,
ies->len);
rcu_read_unlock();
cfg80211_put_bss(ar->hw->wiphy, bss);
}
/* FIXME: base on RSN IE/WPA IE is a correct idea? */
if (rsnie || wpaie) {
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"%s: rsn ie found\n", __func__);
arg->need_ptk_4_way = true;
}
if (wpaie) {
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"%s: wpa ie found\n", __func__);
arg->need_gtk_2_way = true;
}
if (sta->mfp) {
/* TODO: Need to check if FW supports PMF? */
arg->is_pmf_enabled = true;
}
/* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
}
static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ath12k_vif *arvif = (void *)vif->drv_priv;
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
enum nl80211_band band;
u32 ratemask;
u8 rate;
int i;
lockdep_assert_held(&ar->conf_mutex);
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return;
band = def.chan->band;
sband = ar->hw->wiphy->bands[band];
ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
rateset->num_rates = 0;
for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
if (!(ratemask & 1))
continue;
rate = ath12k_mac_bitrate_to_rate(rates->bitrate);
rateset->rates[rateset->num_rates] = rate;
rateset->num_rates++;
}
}
static bool
ath12k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask)
{
int nss;
for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
if (ht_mcs_mask[nss])
return false;
return true;
}
static bool
ath12k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask)
{
int nss;
for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
if (vht_mcs_mask[nss])
return false;
return true;
}
static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
int i, n;
u8 max_nss;
u32 stbc;
lockdep_assert_held(&ar->conf_mutex);
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return;
if (!ht_cap->ht_supported)
return;
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
if (ath12k_peer_assoc_h_ht_masked(ht_mcs_mask))
return;
arg->ht_flag = true;
arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ht_cap->ampdu_factor)) - 1;
arg->peer_mpdu_density =
ath12k_parse_mpdudensity(ht_cap->ampdu_density);
arg->peer_ht_caps = ht_cap->cap;
arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG;
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->ldpc_flag = true;
if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->bw_40 = true;
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40))
arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
}
if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG;
arg->stbc_flag = true;
}
if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S;
arg->peer_rate_caps |= stbc;
arg->stbc_flag = true;
}
if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG;
else if (ht_cap->mcs.rx_mask[1])
arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG;
for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
(ht_mcs_mask[i / 8] & BIT(i % 8))) {
max_nss = (i / 8) + 1;
arg->peer_ht_rates.rates[n++] = i;
}
/* This is a workaround for HT-enabled STAs which break the spec
* and have no HT capabilities RX mask (no HT RX MCS map).
*
* As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
* MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
*
* Firmware asserts if such situation occurs.
*/
if (n == 0) {
arg->peer_ht_rates.num_rates = 8;
for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
arg->peer_mac,
arg->peer_ht_rates.num_rates,
arg->peer_nss);
}
static int ath12k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
{
switch ((mcs_map >> (2 * nss)) & 0x3) {
case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
}
return 0;
}
static u16
ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
{
int idx_limit;
int nss;
u16 mcs_map;
u16 mcs;
for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
mcs_map = ath12k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
vht_mcs_limit[nss];
if (mcs_map)
idx_limit = fls(mcs_map) - 1;
else
idx_limit = -1;
switch (idx_limit) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
break;
case 8:
mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
break;
case 9:
mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
break;
default:
WARN_ON(1);
fallthrough;
case -1:
mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
break;
}
tx_mcs_set &= ~(0x3 << (nss * 2));
tx_mcs_set |= mcs << (nss * 2);
}
return tx_mcs_set;
}
static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u16 *vht_mcs_mask;
u16 tx_mcs_map;
u8 ampdu_factor;
u8 max_nss, vht_mcs;
int i;
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return;
if (!vht_cap->vht_supported)
return;
band = def.chan->band;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
if (ath12k_peer_assoc_h_vht_masked(vht_mcs_mask))
return;
arg->vht_flag = true;
/* TODO: similar flags required? */
arg->vht_capable = true;
if (def.chan->band == NL80211_BAND_2GHZ)
arg->vht_ng_flag = true;
arg->peer_vht_caps = vht_cap->cap;
ampdu_factor = (vht_cap->cap &
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
* zero in VHT IE. Using it would result in degraded throughput.
* arg->peer_max_mpdu at this point contains HT max_mpdu so keep
* it if VHT max_mpdu is smaller.
*/
arg->peer_max_mpdu = max(arg->peer_max_mpdu,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
/* Calculate peer NSS capability from VHT capabilities if STA
* supports VHT.
*/
for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
(2 * i) & 3;
if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED &&
vht_mcs_mask[i])
max_nss = i + 1;
}
arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
tx_mcs_map = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
arg->tx_mcs_set = ath12k_peer_assoc_h_vht_limit(tx_mcs_map, vht_mcs_mask);
/* In QCN9274 platform, VHT MCS rate 10 and 11 is enabled by default.
* VHT MCS rate 10 and 11 is not supported in 11ac standard.
* so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
*/
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
IEEE80211_VHT_MCS_NOT_SUPPORTED)
arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
/* TODO: Check */
arg->tx_max_mcs_nss = 0xFF;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
/* TODO: rxnss_override */
}
static void ath12k_peer_assoc_h_he(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
int i;
u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
u16 mcs_160_map, mcs_80_map;
bool support_160;
u16 v;
if (!he_cap->has_he)
return;
arg->he_flag = true;
support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G);
/* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
if (support_160) {
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
rx_mcs_160 = i + 1;
break;
}
}
}
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
rx_mcs_80 = i + 1;
break;
}
}
if (support_160)
max_nss = min(rx_mcs_80, rx_mcs_160);
else
max_nss = rx_mcs_80;
arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
sizeof(he_cap->he_cap_elem.mac_cap_info));
memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
sizeof(he_cap->he_cap_elem.phy_cap_info));
arg->peer_he_ops = vif->bss_conf.he_oper.params;
/* the top most byte is used to indicate BSS color info */
arg->peer_he_ops &= 0xffffff;
/* As per section 26.6.1 IEEE Std 802.11ax‐2022, if the Max AMPDU
* Exponent Extension in HE cap is zero, use the arg->peer_max_mpdu
* as calculated while parsing VHT caps(if VHT caps is present)
* or HT caps (if VHT caps is not present).
*
* For non-zero value of Max AMPDU Exponent Extension in HE MAC caps,
* if a HE STA sends VHT cap and HE cap IE in assoc request then, use
* MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length.
* If a HE STA that does not send VHT cap, but HE and HT cap in assoc
* request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
* length.
*/
ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK;
if (ampdu_factor) {
if (sta->deflink.vht_cap.vht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
else if (sta->deflink.ht_cap.ht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
}
if (he_cap->he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
int bit = 7;
int nss, ru;
arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] &
IEEE80211_PPE_THRES_NSS_MASK;
arg->peer_ppet.ru_bit_mask =
(he_cap->ppe_thres[0] &
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) {
for (ru = 0; ru < 4; ru++) {
u32 val = 0;
int i;
if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0)
continue;
for (i = 0; i < 6; i++) {
val >>= 1;
val |= ((he_cap->ppe_thres[bit / 8] >>
(bit % 8)) & 0x1) << 5;
bit++;
}
arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |=
val << (ru * 6);
}
}
}
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES)
arg->twt_responder = true;
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
arg->peer_he_mcs_count++;
}
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
arg->peer_he_mcs_count++;
fallthrough;
default:
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
arg->peer_he_mcs_count++;
break;
}
}
static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
int smps;
if (!ht_cap->ht_supported)
return;
smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
switch (smps) {
case WLAN_HT_CAP_SM_PS_STATIC:
arg->static_mimops_flag = true;
break;
case WLAN_HT_CAP_SM_PS_DYNAMIC:
arg->dynamic_mimops_flag = true;
break;
case WLAN_HT_CAP_SM_PS_DISABLED:
arg->spatial_mux_flag = true;
break;
default:
break;
}
}
static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ath12k_vif *arvif = (void *)vif->drv_priv;
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
if (sta->wme) {
/* TODO: Check WME vs QoS */
arg->is_wme_set = true;
arg->qos_flag = true;
}
if (sta->wme && sta->uapsd_queues) {
/* TODO: Check WME vs QoS */
arg->is_wme_set = true;
arg->apsd_flag = true;
arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG;
}
break;
case WMI_VDEV_TYPE_STA:
if (sta->wme) {
arg->is_wme_set = true;
arg->qos_flag = true;
}
break;
default:
break;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
sta->addr, arg->qos_flag);
}
static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
struct ath12k_vif *arvif,
struct ieee80211_sta *sta)
{
struct ath12k_wmi_ap_ps_arg arg;
u32 max_sp;
u32 uapsd;
int ret;
lockdep_assert_held(&ar->conf_mutex);
arg.vdev_id = arvif->vdev_id;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
uapsd = 0;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
max_sp = 0;
if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
max_sp = sta->max_sp;
arg.param = WMI_AP_PS_PEER_PARAM_UAPSD;
arg.value = uapsd;
ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
arg.value = max_sp;
ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
if (ret)
goto err;
/* TODO: revisit during testing */
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
if (ret)
goto err;
return 0;
err:
ath12k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
arg.param, arvif->vdev_id, ret);
return ret;
}
static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
ATH12K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
struct ieee80211_sta *sta)
{
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
switch (sta->deflink.vht_cap.cap &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
return MODE_11AC_VHT80_80;
default:
/* not sure if this is a valid case? */
return MODE_11AC_VHT160;
}
}
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
struct ieee80211_sta *sta)
{
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11AX_HE160;
else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11AX_HE80_80;
/* not sure if this is a valid case? */
return MODE_11AX_HE160;
}
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AX_HE80;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AX_HE40;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AX_HE20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar,
struct ieee80211_sta *sta)
{
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] &
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
return MODE_11BE_EHT320;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11BE_EHT160;
if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11BE_EHT80_80;
ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n",
sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]);
return MODE_11BE_EHT160;
}
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11BE_EHT80;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11BE_EHT40;
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11BE_EHT20;
return MODE_UNKNOWN;
}
static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ath12k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return;
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
switch (band) {
case NL80211_BAND_2GHZ:
if (sta->deflink.eht_cap.has_eht) {
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11BE_EHT40_2G;
else
phymode = MODE_11BE_EHT20_2G;
} else if (sta->deflink.he_cap.has_he) {
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AX_HE40_2G;
else
phymode = MODE_11AX_HE20_2G;
} else if (sta->deflink.vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
} else if (sta->deflink.ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
} else if (ath12k_mac_sta_has_ofdm_only(sta)) {
phymode = MODE_11G;
} else {
phymode = MODE_11B;
}
break;
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check EHT first */
if (sta->deflink.eht_cap.has_eht) {
phymode = ath12k_mac_get_phymode_eht(ar, sta);
} else if (sta->deflink.he_cap.has_he) {
phymode = ath12k_mac_get_phymode_he(ar, sta);
} else if (sta->deflink.vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
phymode = ath12k_mac_get_phymode_vht(ar, sta);
} else if (sta->deflink.ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
} else {
phymode = MODE_11A;
}
break;
default:
break;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
sta->addr, ath12k_mac_phymode_str(phymode));
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
}
static void ath12k_mac_set_eht_mcs(u8 rx_tx_mcs7, u8 rx_tx_mcs9,
u8 rx_tx_mcs11, u8 rx_tx_mcs13,
u32 *rx_mcs, u32 *tx_mcs)
{
*rx_mcs = 0;
u32p_replace_bits(rx_mcs,
u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_RX),
WMI_EHT_MCS_NSS_0_7);
u32p_replace_bits(rx_mcs,
u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_RX),
WMI_EHT_MCS_NSS_8_9);
u32p_replace_bits(rx_mcs,
u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_RX),
WMI_EHT_MCS_NSS_10_11);
u32p_replace_bits(rx_mcs,
u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_RX),
WMI_EHT_MCS_NSS_12_13);
*tx_mcs = 0;
u32p_replace_bits(tx_mcs,
u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_TX),
WMI_EHT_MCS_NSS_0_7);
u32p_replace_bits(tx_mcs,
u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_TX),
WMI_EHT_MCS_NSS_8_9);
u32p_replace_bits(tx_mcs,
u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_TX),
WMI_EHT_MCS_NSS_10_11);
u32p_replace_bits(tx_mcs,
u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_TX),
WMI_EHT_MCS_NSS_12_13);
}
static void ath12k_mac_set_eht_ppe_threshold(const u8 *ppe_thres,
struct ath12k_wmi_ppe_threshold_arg *ppet)
{
u32 bit_pos = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE, val;
u8 nss, ru, i;
u8 ppet_bit_len_per_ru = IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
ppet->numss_m1 = u8_get_bits(ppe_thres[0], IEEE80211_EHT_PPE_THRES_NSS_MASK);
ppet->ru_bit_mask = u16_get_bits(get_unaligned_le16(ppe_thres),
IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
for (nss = 0; nss <= ppet->numss_m1; nss++) {
for (ru = 0;
ru < hweight16(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
ru++) {
if ((ppet->ru_bit_mask & BIT(ru)) == 0)
continue;
val = 0;
for (i = 0; i < ppet_bit_len_per_ru; i++) {
val |= (((ppe_thres[bit_pos / 8] >>
(bit_pos % 8)) & 0x1) << i);
bit_pos++;
}
ppet->ppet16_ppet8_ru3_ru0[nss] |=
(val << (ru * ppet_bit_len_per_ru));
}
}
}
static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20;
const struct ieee80211_eht_mcs_nss_supp_bw *bw;
struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv;
u32 *rx_mcs, *tx_mcs;
if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht)
return;
arg->eht_flag = true;
if ((eht_cap->eht_cap_elem.phy_cap_info[5] &
IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) &&
eht_cap->eht_ppe_thres[0] != 0)
ath12k_mac_set_eht_ppe_threshold(eht_cap->eht_ppe_thres,
&arg->peer_eht_ppet);
memcpy(arg->peer_eht_cap_mac, eht_cap->eht_cap_elem.mac_cap_info,
sizeof(eht_cap->eht_cap_elem.mac_cap_info));
memcpy(arg->peer_eht_cap_phy, eht_cap->eht_cap_elem.phy_cap_info,
sizeof(eht_cap->eht_cap_elem.phy_cap_info));
rx_mcs = arg->peer_eht_rx_mcs_set;
tx_mcs = arg->peer_eht_tx_mcs_set;
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_320:
bw = &eht_cap->eht_mcs_nss_supp.bw._320;
ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss,
bw->rx_tx_mcs9_max_nss,
bw->rx_tx_mcs11_max_nss,
bw->rx_tx_mcs13_max_nss,
&rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320],
&tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320]);
arg->peer_eht_mcs_count++;
fallthrough;
case IEEE80211_STA_RX_BW_160:
bw = &eht_cap->eht_mcs_nss_supp.bw._160;
ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss,
bw->rx_tx_mcs9_max_nss,
bw->rx_tx_mcs11_max_nss,
bw->rx_tx_mcs13_max_nss,
&rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160],
&tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160]);
arg->peer_eht_mcs_count++;
fallthrough;
default:
if ((he_cap->he_cap_elem.phy_cap_info[0] &
(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) {
bw_20 = &eht_cap->eht_mcs_nss_supp.only_20mhz;
ath12k_mac_set_eht_mcs(bw_20->rx_tx_mcs7_max_nss,
bw_20->rx_tx_mcs9_max_nss,
bw_20->rx_tx_mcs11_max_nss,
bw_20->rx_tx_mcs13_max_nss,
&rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80],
&tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]);
} else {
bw = &eht_cap->eht_mcs_nss_supp.bw._80;
ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss,
bw->rx_tx_mcs9_max_nss,
bw->rx_tx_mcs11_max_nss,
bw->rx_tx_mcs13_max_nss,
&rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80],
&tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]);
}
arg->peer_eht_mcs_count++;
break;
}
arg->punct_bitmap = ~arvif->punct_bitmap;
}
static void ath12k_peer_assoc_prepare(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg,
bool reassoc)
{
lockdep_assert_held(&ar->conf_mutex);
memset(arg, 0, sizeof(*arg));
reinit_completion(&ar->peer_assoc_done);
arg->peer_new_assoc = !reassoc;
ath12k_peer_assoc_h_basic(ar, vif, sta, arg);
ath12k_peer_assoc_h_crypto(ar, vif, sta, arg);
ath12k_peer_assoc_h_rates(ar, vif, sta, arg);
ath12k_peer_assoc_h_ht(ar, vif, sta, arg);
ath12k_peer_assoc_h_vht(ar, vif, sta, arg);
ath12k_peer_assoc_h_he(ar, vif, sta, arg);
ath12k_peer_assoc_h_eht(ar, vif, sta, arg);
ath12k_peer_assoc_h_qos(ar, vif, sta, arg);
ath12k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath12k_peer_assoc_h_smps(sta, arg);
/* TODO: amsdu_disable req? */
}
static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
const u8 *addr,
const struct ieee80211_sta_ht_cap *ht_cap)
{
int smps;
if (!ht_cap->ht_supported)
return 0;
smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
if (smps >= ARRAY_SIZE(ath12k_smps_map))
return -EINVAL;
return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE,
ath12k_smps_map[smps]);
}
static void ath12k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
struct ath12k *ar = hw->priv;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
struct ath12k_wmi_peer_assoc_arg peer_arg;
struct ieee80211_sta *ap_sta;
struct ath12k_peer *peer;
bool is_auth = false;
int ret;
lockdep_assert_held(&ar->conf_mutex);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
arvif->vdev_id, arvif->bssid, arvif->aid);
rcu_read_lock();
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!ap_sta) {
ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
bss_conf->bssid, arvif->vdev_id);
rcu_read_unlock();
return;
}
ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
rcu_read_unlock();
ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
return;
}
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
bss_conf->bssid, arvif->vdev_id);
return;
}
ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
&ap_sta->deflink.ht_cap);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
return;
}
WARN_ON(arvif->is_up);
arvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
return;
}
arvif->is_up = true;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
if (peer && peer->is_authorized)
is_auth = true;
spin_unlock_bh(&ar->ab->base_lock);
/* Authorize BSS Peer */
if (is_auth) {
ret = ath12k_wmi_set_peer_param(ar, arvif->bssid,
arvif->vdev_id,
WMI_PEER_AUTHORIZE,
1);
if (ret)
ath12k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
}
ret = ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
&bss_conf->he_obss_pd);
if (ret)
ath12k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
arvif->vdev_id, ret);
}
static void ath12k_bss_disassoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k *ar = hw->priv;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
int ret;
lockdep_assert_held(&ar->conf_mutex);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
arvif->vdev_id, arvif->bssid);
ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
ath12k_warn(ar->ab, "failed to down vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->is_up = false;
/* TODO: cancel connection_loss_work */
}
static u32 ath12k_mac_get_rate_hw_value(int bitrate)
{
u32 preamble;
u16 hw_value;
int rate;
size_t i;
if (ath12k_mac_bitrate_is_cck(bitrate))
preamble = WMI_RATE_PREAMBLE_CCK;
else
preamble = WMI_RATE_PREAMBLE_OFDM;
for (i = 0; i < ARRAY_SIZE(ath12k_legacy_rates); i++) {
if (ath12k_legacy_rates[i].bitrate != bitrate)
continue;
hw_value = ath12k_legacy_rates[i].hw_value;
rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble);
return rate;
}
return -EINVAL;
}
static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *def)
{
struct ath12k_vif *arvif = (void *)vif->drv_priv;
const struct ieee80211_supported_band *sband;
u8 basic_rate_idx;
int hw_rate_code;
u32 vdev_param;
u16 bitrate;
int ret;
lockdep_assert_held(&ar->conf_mutex);
sband = ar->hw->wiphy->bands[def->chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
if (hw_rate_code < 0) {
ath12k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
return;
}
vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
if (ret)
ath12k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
if (ret)
ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
}
static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath12k *ar = arvif->ar;
struct sk_buff *tmpl;
int ret;
u32 interval;
bool unsol_bcast_probe_resp_enabled = false;
if (info->fils_discovery.max_interval) {
interval = info->fils_discovery.max_interval;
tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
if (tmpl)
ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
tmpl);
} else if (info->unsol_bcast_probe_resp_interval) {
unsol_bcast_probe_resp_enabled = 1;
interval = info->unsol_bcast_probe_resp_interval;
tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
arvif->vif);
if (tmpl)
ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
tmpl);
} else { /* Disable */
return ath12k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false);
}
if (!tmpl) {
ath12k_warn(ar->ab,
"mac vdev %i failed to retrieve %s template\n",
arvif->vdev_id, (unsol_bcast_probe_resp_enabled ?
"unsolicited broadcast probe response" :
"FILS discovery"));
return -EPERM;
}
kfree_skb(tmpl);
if (!ret)
ret = ath12k_wmi_fils_discovery(ar, arvif->vdev_id, interval,
unsol_bcast_probe_resp_enabled);
return ret;
}
static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u64 changed)
{
struct ath12k *ar = hw->priv;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
u32 vdev_param;
int mcast_rate;
u32 preamble;
u16 hw_value;
u16 bitrate;
int ret;
u8 rateidx;
u32 rate;
mutex_lock(&ar->conf_mutex);
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id,
arvif->beacon_interval);
if (ret)
ath12k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
arvif->vdev_id);
else
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"Beacon interval: %d set for VDEV: %d\n",
arvif->beacon_interval, arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON) {
param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
param_value = WMI_BEACON_STAGGERED_MODE;
ret = ath12k_wmi_pdev_set_param(ar, param_id,
param_value, ar->pdev->pdev_id);
if (ret)
ath12k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
arvif->vdev_id);
else
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"Set staggered beacon mode for VDEV: %d\n",
arvif->vdev_id);
ret = ath12k_mac_setup_bcn_tmpl(arvif);
if (ret)
ath12k_warn(ar->ab, "failed to update bcn template: %d\n",
ret);
}
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
arvif->dtim_period = info->dtim_period;
param_id = WMI_VDEV_PARAM_DTIM_PERIOD;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id,
arvif->dtim_period);
if (ret)
ath12k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
arvif->vdev_id, ret);
else
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"DTIM period: %d set for VDEV: %d\n",
arvif->dtim_period, arvif->vdev_id);
}
if (changed & BSS_CHANGED_SSID &&
vif->type == NL80211_IFTYPE_AP) {
arvif->u.ap.ssid_len = vif->cfg.ssid_len;
if (vif->cfg.ssid_len)
memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
ether_addr_copy(arvif->bssid, info->bssid);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
ath12k_control_beaconing(arvif, info);
if (arvif->is_up && vif->bss_conf.he_support &&
vif->bss_conf.he_oper.params) {
/* TODO: Extend to support 1024 BA Bitmap size */
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BA_MODE,
WMI_BA_MODE_BUFFER_SIZE_256);
if (ret)
ath12k_warn(ar->ab,
"failed to set BA BUFFER SIZE 256 for vdev: %d\n",
arvif->vdev_id);
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
param_value = vif->bss_conf.he_oper.params;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"he oper param: %x set for VDEV: %d\n",
param_value, arvif->vdev_id);
if (ret)
ath12k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n",
param_value, arvif->vdev_id, ret);
}
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
u32 cts_prot;
cts_prot = !!(info->use_cts_prot);
param_id = WMI_VDEV_PARAM_PROTECTION_MODE;
if (arvif->is_started) {
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, cts_prot);
if (ret)
ath12k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
arvif->vdev_id);
else
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
cts_prot, arvif->vdev_id);
} else {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
}
}
if (changed & BSS_CHANGED_ERP_SLOT) {
u32 slottime;
if (info->use_short_slot)
slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
else
slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
param_id = WMI_VDEV_PARAM_SLOT_TIME;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, slottime);
if (ret)
ath12k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
arvif->vdev_id);
else
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"Set slottime: %d for VDEV: %d\n",
slottime, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
u32 preamble;
if (info->use_short_preamble)
preamble = WMI_VDEV_PREAMBLE_SHORT;
else
preamble = WMI_VDEV_PREAMBLE_LONG;
param_id = WMI_VDEV_PARAM_PREAMBLE;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, preamble);
if (ret)
ath12k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
arvif->vdev_id);
else
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"Set preamble: %d for VDEV: %d\n",
preamble, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc)
ath12k_bss_assoc(hw, vif, info);
else
ath12k_bss_disassoc(hw, vif);
}
if (changed & BSS_CHANGED_TXPOWER) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev_id %i txpower %d\n",
arvif->vdev_id, info->txpower);
arvif->txpower = info->txpower;
ath12k_mac_txpower_recalc(ar);
}
if (changed & BSS_CHANGED_MCAST_RATE &&
!ath12k_mac_vif_chan(arvif->vif, &def)) {
band = def.chan->band;
mcast_rate = vif->bss_conf.mcast_rate[band];
if (mcast_rate > 0)
rateidx = mcast_rate - 1;
else
rateidx = ffs(vif->bss_conf.basic_rates) - 1;
if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
bitrate = ath12k_legacy_rates[rateidx].bitrate;
hw_value = ath12k_legacy_rates[rateidx].hw_value;
if (ath12k_mac_bitrate_is_cck(bitrate))
preamble = WMI_RATE_PREAMBLE_CCK;
else
preamble = WMI_RATE_PREAMBLE_OFDM;
rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac vdev %d mcast_rate %x\n",
arvif->vdev_id, rate);
vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, rate);
if (ret)
ath12k_warn(ar->ab,
"failed to set mcast rate on vdev %i: %d\n",
arvif->vdev_id, ret);
vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, rate);
if (ret)
ath12k_warn(ar->ab,
"failed to set bcast rate on vdev %i: %d\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BASIC_RATES &&
!ath12k_mac_vif_chan(arvif->vif, &def))
ath12k_recalculate_mgmt_rate(ar, vif, &def);
if (changed & BSS_CHANGED_TWT) {
if (info->twt_requester || info->twt_responder)
ath12k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
else
ath12k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
}
if (changed & BSS_CHANGED_HE_OBSS_PD)
ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
&info->he_obss_pd);
if (changed & BSS_CHANGED_HE_BSS_COLOR) {
if (vif->type == NL80211_IFTYPE_AP) {
ret = ath12k_wmi_obss_color_cfg_cmd(ar,
arvif->vdev_id,
info->he_bss_color.color,
ATH12K_BSS_COLOR_AP_PERIODS,
info->he_bss_color.enabled);
if (ret)
ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
arvif->vdev_id, ret);
} else if (vif->type == NL80211_IFTYPE_STATION) {
ret = ath12k_wmi_send_bss_color_change_enable_cmd(ar,
arvif->vdev_id,
1);
if (ret)
ath12k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n",
arvif->vdev_id, ret);
ret = ath12k_wmi_obss_color_cfg_cmd(ar,
arvif->vdev_id,
0,
ATH12K_BSS_COLOR_STA_PERIODS,
1);
if (ret)
ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
arvif->vdev_id, ret);
}
}
if (changed & BSS_CHANGED_FILS_DISCOVERY ||
changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
ath12k_mac_fils_discovery(arvif, info);
if (changed & BSS_CHANGED_EHT_PUNCTURING)
arvif->punct_bitmap = info->eht_puncturing;
mutex_unlock(&ar->conf_mutex);
}
void __ath12k_mac_scan_finish(struct ath12k *ar)
{
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
break;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
if (!ar->scan.is_roc) {
struct cfg80211_scan_info info = {
.aborted = (ar->scan.state ==
ATH12K_SCAN_ABORTING),
};
ieee80211_scan_completed(ar->hw, &info);
} else if (ar->scan.roc_notify) {
ieee80211_remain_on_channel_expired(ar->hw);
}
fallthrough;
case ATH12K_SCAN_STARTING:
ar->scan.state = ATH12K_SCAN_IDLE;
ar->scan_channel = NULL;
ar->scan.roc_freq = 0;
cancel_delayed_work(&ar->scan.timeout);
complete(&ar->scan.completed);
break;
}
}
void ath12k_mac_scan_finish(struct ath12k *ar)
{
spin_lock_bh(&ar->data_lock);
__ath12k_mac_scan_finish(ar);
spin_unlock_bh(&ar->data_lock);
}
static int ath12k_scan_stop(struct ath12k *ar)
{
struct ath12k_wmi_scan_cancel_arg arg = {
.req_type = WLAN_SCAN_CANCEL_SINGLE,
.scan_id = ATH12K_SCAN_ID,
};
int ret;
lockdep_assert_held(&ar->conf_mutex);
/* TODO: Fill other STOP Params */
arg.pdev_id = ar->pdev->pdev_id;
ret = ath12k_wmi_send_scan_stop_cmd(ar, &arg);
if (ret) {
ath12k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
goto out;
}
ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
if (ret == 0) {
ath12k_warn(ar->ab,
"failed to receive scan abort comple: timed out\n");
ret = -ETIMEDOUT;
} else if (ret > 0) {
ret = 0;
}
out:
/* Scan state should be updated upon scan completion but in case
* firmware fails to deliver the event (for whatever reason) it is
* desired to clean up scan state anyway. Firmware may have just
* dropped the scan completion event delivery due to transport pipe
* being overflown with data and/or it can recover on its own before
* next scan request is submitted.
*/
spin_lock_bh(&ar->data_lock);
if (ar->scan.state != ATH12K_SCAN_IDLE)
__ath12k_mac_scan_finish(ar);
spin_unlock_bh(&ar->data_lock);
return ret;
}
static void ath12k_scan_abort(struct ath12k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
/* This can happen if timeout worker kicked in and called
* abortion while scan completion was being processed.
*/
break;
case ATH12K_SCAN_STARTING:
case ATH12K_SCAN_ABORTING:
ath12k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
ar->scan.state);
break;
case ATH12K_SCAN_RUNNING:
ar->scan.state = ATH12K_SCAN_ABORTING;
spin_unlock_bh(&ar->data_lock);
ret = ath12k_scan_stop(ar);
if (ret)
ath12k_warn(ar->ab, "failed to abort scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
break;
}
spin_unlock_bh(&ar->data_lock);
}
static void ath12k_scan_timeout_work(struct work_struct *work)
{
struct ath12k *ar = container_of(work, struct ath12k,
scan.timeout.work);
mutex_lock(&ar->conf_mutex);
ath12k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
}
static int ath12k_start_scan(struct ath12k *ar,
struct ath12k_wmi_scan_req_arg *arg)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
ret = ath12k_wmi_send_scan_start_cmd(ar, arg);
if (ret)
return ret;
ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
if (ret == 0) {
ret = ath12k_scan_stop(ar);
if (ret)
ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
return -ETIMEDOUT;
}
/* If we failed to start the scan, return error code at
* this point. This is probably due to some issue in the
* firmware, but no need to wedge the driver due to that...
*/
spin_lock_bh(&ar->data_lock);
if (ar->scan.state == ATH12K_SCAN_IDLE) {
spin_unlock_bh(&ar->data_lock);
return -EINVAL;
}
spin_unlock_bh(&ar->data_lock);
return 0;
}
static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct ath12k *ar = hw->priv;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
struct ath12k_wmi_scan_req_arg arg = {};
int ret;
int i;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH12K_SCAN_IDLE:
reinit_completion(&ar->scan.started);
reinit_completion(&ar->scan.completed);
ar->scan.state = ATH12K_SCAN_STARTING;
ar->scan.is_roc = false;
ar->scan.vdev_id = arvif->vdev_id;
ret = 0;
break;
case ATH12K_SCAN_STARTING:
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
ret = -EBUSY;
break;
}
spin_unlock_bh(&ar->data_lock);
if (ret)
goto exit;
ath12k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH12K_SCAN_ID;
if (req->ie_len) {
arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
if (!arg.extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
arg.extraie.len = req->ie_len;
}
if (req->n_ssids) {
arg.num_ssids = req->n_ssids;
for (i = 0; i < arg.num_ssids; i++)
arg.ssid[i] = req->ssids[i];
} else {
arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
}
if (req->n_channels) {
arg.num_chan = req->n_channels;
arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
GFP_KERNEL);
if (!arg.chan_list) {
ret = -ENOMEM;
goto exit;
}
for (i = 0; i < arg.num_chan; i++)
arg.chan_list[i] = req->channels[i]->center_freq;
}
ret = ath12k_start_scan(ar, &arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH12K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
}
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
msecs_to_jiffies(arg.max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
exit:
kfree(arg.chan_list);
if (req->ie_len)
kfree(arg.extraie.ptr);
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
ath12k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
}
static int ath12k_install_key(struct ath12k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
const u8 *macaddr, u32 flags)
{
int ret;
struct ath12k *ar = arvif->ar;
struct wmi_vdev_install_key_arg arg = {
.vdev_id = arvif->vdev_id,
.key_idx = key->keyidx,
.key_len = key->keylen,
.key_data = key->key,
.key_flags = flags,
.macaddr = macaddr,
};
lockdep_assert_held(&arvif->ar->conf_mutex);
reinit_completion(&ar->install_key_done);
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
return 0;
if (cmd == DISABLE_KEY) {
/* TODO: Check if FW expects value other than NONE for del */
/* arg.key_cipher = WMI_CIPHER_NONE; */
arg.key_len = 0;
arg.key_data = NULL;
goto install;
}
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
arg.key_cipher = WMI_CIPHER_AES_CCM;
/* TODO: Re-check if flag is valid */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
case WLAN_CIPHER_SUITE_TKIP:
arg.key_cipher = WMI_CIPHER_TKIP;
arg.key_txmic_len = 8;
arg.key_rxmic_len = 8;
break;
case WLAN_CIPHER_SUITE_CCMP_256:
arg.key_cipher = WMI_CIPHER_AES_CCM;
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
arg.key_cipher = WMI_CIPHER_AES_GCM;
break;
default:
ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
return -EOPNOTSUPP;
}
if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
install:
ret = ath12k_wmi_vdev_install_key(arvif->ar, &arg);
if (ret)
return ret;
if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
return -ETIMEDOUT;
if (ether_addr_equal(macaddr, arvif->vif->addr))
arvif->key_cipher = key->cipher;
return ar->install_key_status ? -EINVAL : 0;
}
static int ath12k_clear_peer_keys(struct ath12k_vif *arvif,
const u8 *addr)
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
int first_errno = 0;
int ret;
int i;
u32 flags = 0;
lockdep_assert_held(&ar->conf_mutex);
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, arvif->vdev_id, addr);
spin_unlock_bh(&ab->base_lock);
if (!peer)
return -ENOENT;
for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
if (!peer->keys[i])
continue;
/* key flags are not required to delete the key */
ret = ath12k_install_key(arvif, peer->keys[i],
DISABLE_KEY, addr, flags);
if (ret < 0 && first_errno == 0)
first_errno = ret;
if (ret < 0)
ath12k_warn(ab, "failed to remove peer key %d: %d\n",
i, ret);
spin_lock_bh(&ab->base_lock);
peer->keys[i] = NULL;
spin_unlock_bh(&ab->base_lock);
}
return first_errno;
}
static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
struct ath12k_sta *arsta;
const u8 *peer_addr;
int ret = 0;
u32 flags = 0;
/* BIP needs to be done in software */
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
return 1;
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
return 1;
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
mutex_lock(&ar->conf_mutex);
if (sta)
peer_addr = sta->addr;
else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
peer_addr = vif->bss_conf.bssid;
else
peer_addr = vif->addr;
key->hw_key_idx = key->keyidx;
/* the peer should not disappear in mid-way (unless FW goes awry) since
* we already hold conf_mutex. we just make sure its there now.
*/
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
spin_unlock_bh(&ab->base_lock);
if (!peer) {
if (cmd == SET_KEY) {
ath12k_warn(ab, "cannot install key for non-existent peer %pM\n",
peer_addr);
ret = -EOPNOTSUPP;
goto exit;
} else {
/* if the peer doesn't exist there is no key to disable
* anymore
*/
goto exit;
}
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
flags |= WMI_KEY_PAIRWISE;
else
flags |= WMI_KEY_GROUP;
ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret);
goto exit;
}
ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key);
if (ret) {
ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret);
goto exit;
}
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
if (peer && cmd == SET_KEY) {
peer->keys[key->keyidx] = key;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
peer->ucast_keyidx = key->keyidx;
peer->sec_type = ath12k_dp_tx_get_encrypt_type(key->cipher);
} else {
peer->mcast_keyidx = key->keyidx;
peer->sec_type_grp = ath12k_dp_tx_get_encrypt_type(key->cipher);
}
} else if (peer && cmd == DISABLE_KEY) {
peer->keys[key->keyidx] = NULL;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
peer->ucast_keyidx = 0;
else
peer->mcast_keyidx = 0;
} else if (!peer)
/* impossible unless FW goes crazy */
ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr);
if (sta) {
arsta = (struct ath12k_sta *)sta->drv_priv;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (cmd == SET_KEY)
arsta->pn_type = HAL_PN_TYPE_WPA;
else
arsta->pn_type = HAL_PN_TYPE_NONE;
break;
default:
arsta->pn_type = HAL_PN_TYPE_NONE;
break;
}
}
spin_unlock_bh(&ab->base_lock);
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int
ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
int i;
for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
num_rates += hweight16(mask->control[band].vht_mcs[i]);
return num_rates;
}
static int
ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif,
struct ieee80211_sta *sta,
const struct cfg80211_bitrate_mask *mask,
enum nl80211_band band)
{
struct ath12k *ar = arvif->ar;
u8 vht_rate, nss;
u32 rate_code;
int ret, i;
lockdep_assert_held(&ar->conf_mutex);
nss = 0;
for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
nss = i + 1;
vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1;
}
}
if (!nss) {
ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
sta->addr);
return -EINVAL;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
sta->addr);
rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1,
WMI_RATE_PREAMBLE_VHT);
ret = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
rate_code);
if (ret)
ath12k_warn(ar->ab,
"failed to update STA %pM Fixed Rate %d: %d\n",
sta->addr, rate_code, ret);
return ret;
}
static int ath12k_station_assoc(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
bool reassoc)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_wmi_peer_assoc_arg peer_arg;
int ret;
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
u8 num_vht_rates;
lockdep_assert_held(&ar->conf_mutex);
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return -EPERM;
band = def.chan->band;
mask = &arvif->bitrate_mask;
ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
sta->addr, arvif->vdev_id);
return -ETIMEDOUT;
}
num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
/* If single VHT rate is configured (by set_bitrate_mask()),
* peer_assoc will disable VHT. This is now enabled by a peer specific
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
if (ret)
return ret;
}
/* Re-assoc is run only to update supported rates for given station. It
* doesn't make much sense to reconfigure the peer completely.
*/
if (reassoc)
return 0;
ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
&sta->deflink.ht_cap);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
if (!sta->wme) {
arvif->num_legacy_stations++;
ret = ath12k_recalc_rtscts_prot(arvif);
if (ret)
return ret;
}
if (sta->wme && sta->uapsd_queues) {
ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta);
if (ret) {
ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath12k_station_disassoc(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath12k_vif *arvif = (void *)vif->drv_priv;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (!sta->wme) {
arvif->num_legacy_stations--;
ret = ath12k_recalc_rtscts_prot(arvif);
if (ret)
return ret;
}
ret = ath12k_clear_peer_keys(arvif, sta->addr);
if (ret) {
ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
return 0;
}
static void ath12k_sta_rc_update_wk(struct work_struct *wk)
{
struct ath12k *ar;
struct ath12k_vif *arvif;
struct ath12k_sta *arsta;
struct ieee80211_sta *sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
u32 changed, bw, nss, smps, bw_prev;
int err, num_vht_rates;
const struct cfg80211_bitrate_mask *mask;
struct ath12k_wmi_peer_assoc_arg peer_arg;
enum wmi_phy_mode peer_phymode;
arsta = container_of(wk, struct ath12k_sta, update_wk);
sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
arvif = arsta->arvif;
ar = arvif->ar;
if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def)))
return;
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
spin_lock_bh(&ar->data_lock);
changed = arsta->changed;
arsta->changed = 0;
bw = arsta->bw;
bw_prev = arsta->bw_prev;
nss = arsta->nss;
smps = arsta->smps;
spin_unlock_bh(&ar->data_lock);
mutex_lock(&ar->conf_mutex);
nss = max_t(u32, 1, nss);
nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask),
ath12k_mac_max_vht_nss(vht_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
ath12k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
peer_phymode = peer_arg.peer_phymode;
if (bw > bw_prev) {
/* Phymode shows maximum supported channel width, if we
* upgrade bandwidth then due to sanity check of firmware,
* we have to send WMI_PEER_PHYMODE followed by
* WMI_PEER_CHWIDTH
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n",
sta->addr, bw, bw_prev);
err = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
sta->addr, peer_phymode, err);
goto err_rc_bw_changed;
}
err = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n",
sta->addr, bw, err);
} else {
/* When we downgrade bandwidth this will conflict with phymode
* and cause to trigger firmware crash. In this case we send
* WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n",
sta->addr, bw, bw_prev);
err = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n",
sta->addr, bw, err);
goto err_rc_bw_changed;
}
err = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
sta->addr, peer_phymode, err);
}
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
sta->addr, nss);
err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_NSS, nss);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
sta->addr, nss, err);
}
if (changed & IEEE80211_RC_SMPS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
sta->addr, smps);
err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE, smps);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
sta->addr, smps, err);
}
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
mask = &arvif->bitrate_mask;
num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
mask);
/* Peer_assoc_prepare will reject vht rates in
* bitrate_mask if its not available in range format and
* sets vht tx_rateset as unsupported. So multiple VHT MCS
* setting(eg. MCS 4,5,6) per peer is not supported here.
* But, Single rate in VHT mask can be set as per-peer
* fixed rate. But even if any HT rates are configured in
* the bitrate mask, device will not switch to those rates
* when per-peer Fixed rate is set.
* TODO: Check RATEMASK_CMDID to support auto rates selection
* across HT/VHT and for multiple VHT MCS support.
*/
if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
} else {
/* If the peer is non-VHT or no fixed VHT rate
* is provided in the new bitrate mask we set the
* other rates using peer_assoc command.
*/
ath12k_peer_assoc_prepare(ar, arvif->vif, sta,
&peer_arg, true);
err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (err)
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, err);
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
sta->addr, arvif->vdev_id);
}
}
err_rc_bw_changed:
mutex_unlock(&ar->conf_mutex);
}
static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif,
struct ieee80211_sta *sta)
{
struct ath12k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return 0;
if (ar->num_stations >= ar->max_num_stations)
return -ENOBUFS;
ar->num_stations++;
return 0;
}
static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif,
struct ieee80211_sta *sta)
{
struct ath12k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return;
ar->num_stations--;
}
static int ath12k_mac_station_add(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
struct ath12k_wmi_peer_create_arg peer_param;
int ret;
lockdep_assert_held(&ar->conf_mutex);
ret = ath12k_mac_inc_num_stations(arvif, sta);
if (ret) {
ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
ar->max_num_stations);
goto exit;
}
arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
if (!arsta->rx_stats) {
ret = -ENOMEM;
goto dec_num_station;
}
peer_param.vdev_id = arvif->vdev_id;
peer_param.peer_addr = sta->addr;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
if (ret) {
ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
goto free_peer;
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
if (ieee80211_vif_is_mesh(vif)) {
ret = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret) {
ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
sta->addr, ret);
goto free_peer;
}
}
ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
if (ret) {
ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
sta->addr, arvif->vdev_id, ret);
goto free_peer;
}
if (ab->hw_params->vdev_start_delay &&
!arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
ret = ath12k_start_vdev_delay(ar->hw, vif);
if (ret) {
ath12k_warn(ab, "failed to delay vdev start: %d\n", ret);
goto free_peer;
}
}
return 0;
free_peer:
ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
dec_num_station:
ath12k_mac_dec_num_stations(arvif, sta);
exit:
return ret;
}
static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
struct ieee80211_sta *sta)
{
u32 bw = WMI_PEER_CHWIDTH_20MHZ;
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_20:
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
case IEEE80211_STA_RX_BW_40:
bw = WMI_PEER_CHWIDTH_40MHZ;
break;
case IEEE80211_STA_RX_BW_80:
bw = WMI_PEER_CHWIDTH_80MHZ;
break;
case IEEE80211_STA_RX_BW_160:
bw = WMI_PEER_CHWIDTH_160MHZ;
break;
default:
ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
sta->deflink.bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
}
return bw;
}
static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
struct ath12k *ar = hw->priv;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
struct ath12k_peer *peer;
int ret = 0;
/* cancel must be done outside the mutex to avoid deadlock */
if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST))
cancel_work_sync(&arsta->update_wk);
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk);
ret = ath12k_mac_station_add(ar, vif, sta);
if (ret)
ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
else
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
ath12k_mac_dec_num_stations(arvif, sta);
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (peer && peer->sta == sta) {
ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
vif->addr, arvif->vdev_id);
peer->sta = NULL;
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ar->ab->base_lock);
kfree(arsta->rx_stats);
arsta->rx_stats = NULL;
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath12k_station_assoc(ar, vif, sta, false);
if (ret)
ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
sta->addr);
spin_lock_bh(&ar->data_lock);
arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
arsta->bw_prev = sta->deflink.bandwidth;
spin_unlock_bh(&ar->data_lock);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (peer)
peer->is_authorized = true;
spin_unlock_bh(&ar->ab->base_lock);
if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
ret = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_AUTHORIZE,
1);
if (ret)
ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
sta->addr, arvif->vdev_id, ret);
}
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (peer)
peer->is_authorized = false;
spin_unlock_bh(&ar->ab->base_lock);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath12k_station_disassoc(ar, vif, sta);
if (ret)
ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
sta->addr);
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath12k *ar = hw->priv;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
int ret;
s16 txpwr;
if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
} else {
txpwr = sta->deflink.txpwr.power;
if (!txpwr)
return -EINVAL;
}
if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
WMI_PEER_USE_FIXED_PWR, txpwr);
if (ret) {
ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
ret);
goto out;
}
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 changed)
{
struct ath12k *ar = hw->priv;
struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
struct ath12k_peer *peer;
u32 bw, smps;
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
sta->addr, arvif->vdev_id);
return;
}
spin_unlock_bh(&ar->ab->base_lock);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss,
sta->deflink.smps_mode);
spin_lock_bh(&ar->data_lock);
if (changed & IEEE80211_RC_BW_CHANGED) {
bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
arsta->bw_prev = arsta->bw;
arsta->bw = bw;
}
if (changed & IEEE80211_RC_NSS_CHANGED)
arsta->nss = sta->deflink.rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
break;
case IEEE80211_SMPS_STATIC:
smps = WMI_PEER_SMPS_STATIC;
break;
case IEEE80211_SMPS_DYNAMIC:
smps = WMI_PEER_SMPS_DYNAMIC;
break;
default:
ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
sta->deflink.smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
arsta->smps = smps;
}
arsta->changed |= changed;
spin_unlock_bh(&ar->data_lock);
ieee80211_queue_work(hw, &arsta->update_wk);
}
static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif,
u16 ac, bool enable)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
u32 value;
int ret;
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
return 0;
switch (ac) {
case IEEE80211_AC_VO:
value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
break;
case IEEE80211_AC_VI:
value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
break;
case IEEE80211_AC_BE:
value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
break;
case IEEE80211_AC_BK:
value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
break;
}
if (enable)
arvif->u.sta.uapsd |= value;
else
arvif->u.sta.uapsd &= ~value;
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
WMI_STA_PS_PARAM_UAPSD,
arvif->u.sta.uapsd);
if (ret) {
ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret);
goto exit;
}
if (arvif->u.sta.uapsd)
value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
else
value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
WMI_STA_PS_PARAM_RX_WAKE_POLICY,
value);
if (ret)
ath12k_warn(ar->ab, "could not set rx wake param %d\n", ret);
exit:
return ret;
}
static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct ath12k *ar = hw->priv;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
struct wmi_wmm_params_arg *p = NULL;
int ret;
mutex_lock(&ar->conf_mutex);
switch (ac) {
case IEEE80211_AC_VO:
p = &arvif->wmm_params.ac_vo;
break;
case IEEE80211_AC_VI:
p = &arvif->wmm_params.ac_vi;
break;
case IEEE80211_AC_BE:
p = &arvif->wmm_params.ac_be;
break;
case IEEE80211_AC_BK:
p = &arvif->wmm_params.ac_bk;
break;
}
if (WARN_ON(!p)) {
ret = -EINVAL;
goto exit;
}
p->cwmin = params->cw_min;
p->cwmax = params->cw_max;
p->aifs = params->aifs;
p->txop = params->txop;
ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id,
&arvif->wmm_params);
if (ret) {
ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
goto exit;
}
ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static struct ieee80211_sta_ht_cap
ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
{
int i;
struct ieee80211_sta_ht_cap ht_cap = {0};
u32 ar_vht_cap = ar->pdev->cap.vht_cap;
if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
return ht_cap;
ht_cap.ht_supported = 1;
ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
if (ar_ht_cap & WMI_HT_CAP_HT20_SGI)
ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
if (ar_ht_cap & WMI_HT_CAP_HT40_SGI)
ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) {
u32 smps;
smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
ht_cap.cap |= smps;
}
if (ar_ht_cap & WMI_HT_CAP_TX_STBC)
ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
if (ar_ht_cap & WMI_HT_CAP_RX_STBC) {
u32 stbc;
stbc = ar_ht_cap;
stbc &= WMI_HT_CAP_RX_STBC;
stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
stbc &= IEEE80211_HT_CAP_RX_STBC;
ht_cap.cap |= stbc;
}
if (ar_ht_cap & WMI_HT_CAP_RX_LDPC)
ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT)
ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
for (i = 0; i < ar->num_rx_chains; i++) {
if (rate_cap_rx_chainmask & BIT(i))
ht_cap.mcs.rx_mask[i] = 0xFF;
}
ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
return ht_cap;
}
static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif)
{
u32 value = 0;
struct ath12k *ar = arvif->ar;
int nsts;
int sound_dim;
u32 vht_cap = ar->pdev->cap.vht_cap;
u32 vdev_param = WMI_VDEV_PARAM_TXBF;
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
sound_dim = vht_cap &
IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
if (sound_dim > (ar->num_tx_chains - 1))
sound_dim = ar->num_tx_chains - 1;
value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
}
if (!value)
return 0;
if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) {
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
arvif->vdev_type == WMI_VDEV_TYPE_AP)
value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
}
if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
arvif->vdev_type == WMI_VDEV_TYPE_STA)
value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
}
return ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, value);
}
static void ath12k_set_vht_txbf_cap(struct ath12k *ar, u32 *vht_cap)
{
bool subfer, subfee;
int sound_dim = 0;
subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
if (ar->num_tx_chains < 2) {
*vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
subfer = false;
}
/* If SU Beaformer is not set, then disable MU Beamformer Capability */
if (!subfer)
*vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
/* If SU Beaformee is not set, then disable MU Beamformee Capability */
if (!subfee)
*vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
sound_dim = u32_get_bits(*vht_cap,
IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
*vht_cap = u32_replace_bits(*vht_cap, 0,
IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
/* TODO: Need to check invalid STS and Sound_dim values set by FW? */
/* Enable Sounding Dimension Field only if SU BF is enabled */
if (subfer) {
if (sound_dim > (ar->num_tx_chains - 1))
sound_dim = ar->num_tx_chains - 1;
*vht_cap = u32_replace_bits(*vht_cap, sound_dim,
IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
}
/* Use the STS advertised by FW unless SU Beamformee is not supported*/
if (!subfee)
*vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
}
static struct ieee80211_sta_vht_cap
ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask,
u32 rate_cap_rx_chainmask)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 txmcs_map, rxmcs_map;
int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->pdev->cap.vht_cap;
ath12k_set_vht_txbf_cap(ar, &vht_cap.cap);
/* TODO: Enable back VHT160 mode once association issues are fixed */
/* Disabling VHT160 and VHT80+80 modes */
vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
rxmcs_map = 0;
txmcs_map = 0;
for (i = 0; i < 8; i++) {
if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
else
txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
else
rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
}
if (rate_cap_tx_chainmask <= 1)
vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
return vht_cap;
}
static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar,
struct ath12k_pdev_cap *cap,
u32 *ht_cap_info)
{
struct ieee80211_supported_band *band;
u32 rate_cap_tx_chainmask;
u32 rate_cap_rx_chainmask;
u32 ht_cap;
rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
if (ht_cap_info)
*ht_cap_info = ht_cap;
band->ht_cap = ath12k_create_ht_cap(ar, ht_cap,
rate_cap_rx_chainmask);
}
if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
(ar->ab->hw_params->single_pdev_only ||
!ar->supports_6ghz)) {
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
if (ht_cap_info)
*ht_cap_info = ht_cap;
band->ht_cap = ath12k_create_ht_cap(ar, ht_cap,
rate_cap_rx_chainmask);
band->vht_cap = ath12k_create_vht_cap(ar, rate_cap_tx_chainmask,
rate_cap_rx_chainmask);
}
}
static int ath12k_check_chain_mask(struct ath12k *ar, u32 ant, bool is_tx_ant)
{
/* TODO: Check the request chainmask against the supported
* chainmask table which is advertised in extented_service_ready event
*/
return 0;
}
static void ath12k_gen_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet,
u8 *he_ppet)
{
int nss, ru;
u8 bit = 7;
he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK;
he_ppet[0] |= (fw_ppet->ru_bit_mask <<
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
for (ru = 0; ru < 4; ru++) {
u8 val;
int i;
if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
continue;
val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
0x3f;
val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
for (i = 5; i >= 0; i--) {
he_ppet[bit / 8] |=
((val >> i) & 0x1) << ((bit % 8));
bit++;
}
}
}
}
static void
ath12k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
{
u8 m;
m = IEEE80211_HE_MAC_CAP0_TWT_RES |
IEEE80211_HE_MAC_CAP0_TWT_REQ;
he_cap_elem->mac_cap_info[0] &= ~m;
m = IEEE80211_HE_MAC_CAP2_TRS |
IEEE80211_HE_MAC_CAP2_BCAST_TWT |
IEEE80211_HE_MAC_CAP2_MU_CASCADING;
he_cap_elem->mac_cap_info[2] &= ~m;
m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED |
IEEE80211_HE_MAC_CAP2_BCAST_TWT |
IEEE80211_HE_MAC_CAP2_MU_CASCADING;
he_cap_elem->mac_cap_info[3] &= ~m;
m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG |
IEEE80211_HE_MAC_CAP4_BQR;
he_cap_elem->mac_cap_info[4] &= ~m;
m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION |
IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
he_cap_elem->mac_cap_info[5] &= ~m;
m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
he_cap_elem->phy_cap_info[2] &= ~m;
m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
he_cap_elem->phy_cap_info[3] &= ~m;
m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
he_cap_elem->phy_cap_info[4] &= ~m;
m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
he_cap_elem->phy_cap_info[5] &= ~m;
m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
he_cap_elem->phy_cap_info[6] &= ~m;
m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR |
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
he_cap_elem->phy_cap_info[7] &= ~m;
m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
he_cap_elem->phy_cap_info[8] &= ~m;
m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
he_cap_elem->phy_cap_info[9] &= ~m;
}
static __le16 ath12k_mac_setup_he_6ghz_cap(struct ath12k_pdev_cap *pcap,
struct ath12k_band_cap *bcap)
{
u8 val;
bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE;
if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
bcap->he_6ghz_capa |=
u32_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
IEEE80211_HE_6GHZ_CAP_SM_PS);
else
bcap->he_6ghz_capa |=
u32_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
IEEE80211_HE_6GHZ_CAP_SM_PS);
val = u32_get_bits(pcap->vht_cap,
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
bcap->he_6ghz_capa |=
u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
val = u32_get_bits(pcap->vht_cap,
IEEE80211_VHT_CAP_MAX_MPDU_MASK);
bcap->he_6ghz_capa |=
u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN)
bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS;
return cpu_to_le16(bcap->he_6ghz_capa);
}
static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap,
int iftype, u8 num_tx_chains,
struct ieee80211_sta_he_cap *he_cap)
{
struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem;
struct ieee80211_he_mcs_nss_supp *mcs_nss = &he_cap->he_mcs_nss_supp;
he_cap->has_he = true;
memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
sizeof(he_cap_elem->mac_cap_info));
memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
sizeof(he_cap_elem->phy_cap_info));
he_cap_elem->mac_cap_info[1] &=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
he_cap_elem->phy_cap_info[5] &=
~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
he_cap_elem->phy_cap_info[5] &=
~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
he_cap_elem->phy_cap_info[5] |= num_tx_chains - 1;
switch (iftype) {
case NL80211_IFTYPE_AP:
he_cap_elem->phy_cap_info[3] &=
~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[0] &= ~IEEE80211_HE_MAC_CAP0_TWT_RES;
he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_REQ;
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
break;
case NL80211_IFTYPE_MESH_POINT:
ath12k_mac_filter_he_cap_mesh(he_cap_elem);
break;
}
mcs_nss->rx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff);
mcs_nss->tx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff);
mcs_nss->rx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
mcs_nss->tx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
mcs_nss->rx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
mcs_nss->tx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
ath12k_gen_ppe_thresh(&band_cap->he_ppet, he_cap->ppe_thres);
}
static void
ath12k_mac_copy_eht_mcs_nss(struct ath12k_band_cap *band_cap,
struct ieee80211_eht_mcs_nss_supp *mcs_nss,
const struct ieee80211_he_cap_elem *he_cap,
const struct ieee80211_eht_cap_elem_fixed *eht_cap)
{
if ((he_cap->phy_cap_info[0] &
(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0)
memcpy(&mcs_nss->only_20mhz, &band_cap->eht_mcs_20_only,
sizeof(struct ieee80211_eht_mcs_nss_supp_20mhz_only));
if (he_cap->phy_cap_info[0] &
(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G))
memcpy(&mcs_nss->bw._80, &band_cap->eht_mcs_80,
sizeof(struct ieee80211_eht_mcs_nss_supp_bw));
if (he_cap->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
memcpy(&mcs_nss->bw._160, &band_cap->eht_mcs_160,
sizeof(struct ieee80211_eht_mcs_nss_supp_bw));
if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
memcpy(&mcs_nss->bw._320, &band_cap->eht_mcs_320,
sizeof(struct ieee80211_eht_mcs_nss_supp_bw));
}
static void ath12k_mac_copy_eht_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet,
struct ieee80211_sta_eht_cap *cap)
{
u16 bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
u8 i, nss, ru, ppet_bit_len_per_ru = IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
u8p_replace_bits(&cap->eht_ppe_thres[0], fw_ppet->numss_m1,
IEEE80211_EHT_PPE_THRES_NSS_MASK);
u16p_replace_bits((u16 *)&cap->eht_ppe_thres[0], fw_ppet->ru_bit_mask,
IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
for (ru = 0;
ru < hweight16(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
ru++) {
u32 val = 0;
if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
continue;
u32p_replace_bits(&val, fw_ppet->ppet16_ppet8_ru3_ru0[nss] >>
(ru * ppet_bit_len_per_ru),
GENMASK(ppet_bit_len_per_ru - 1, 0));
for (i = 0; i < ppet_bit_len_per_ru; i++) {
cap->eht_ppe_thres[bit / 8] |=
(((val >> i) & 0x1) << ((bit % 8)));
bit++;
}
}
}
}
static void ath12k_mac_copy_eht_cap(struct ath12k_band_cap *band_cap,
struct ieee80211_he_cap_elem *he_cap_elem,
int iftype,
struct ieee80211_sta_eht_cap *eht_cap)
{
struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem;
memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap));
eht_cap->has_eht = true;
memcpy(eht_cap_elem->mac_cap_info, band_cap->eht_cap_mac_info,
sizeof(eht_cap_elem->mac_cap_info));
memcpy(eht_cap_elem->phy_cap_info, band_cap->eht_cap_phy_info,
sizeof(eht_cap_elem->phy_cap_info));
switch (iftype) {
case NL80211_IFTYPE_AP:
eht_cap_elem->phy_cap_info[0] &=
~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ;
eht_cap_elem->phy_cap_info[4] &=
~IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO;
eht_cap_elem->phy_cap_info[5] &=
~IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP;
break;
case NL80211_IFTYPE_STATION:
eht_cap_elem->phy_cap_info[7] &=
~(IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ);
eht_cap_elem->phy_cap_info[7] &=
~(IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ);
break;
default:
break;
}
ath12k_mac_copy_eht_mcs_nss(band_cap, &eht_cap->eht_mcs_nss_supp,
he_cap_elem, eht_cap_elem);
if (eht_cap_elem->phy_cap_info[5] &
IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT)
ath12k_mac_copy_eht_ppe_thresh(&band_cap->eht_ppet, eht_cap);
}
static int ath12k_mac_copy_sband_iftype_data(struct ath12k *ar,
struct ath12k_pdev_cap *cap,
struct ieee80211_sband_iftype_data *data,
int band)
{
struct ath12k_band_cap *band_cap = &cap->band[band];
int i, idx = 0;
for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
switch (i) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
break;
default:
continue;
}
data[idx].types_mask = BIT(i);
ath12k_mac_copy_he_cap(band_cap, i, ar->num_tx_chains, he_cap);
if (band == NL80211_BAND_6GHZ) {
data[idx].he_6ghz_capa.capa =
ath12k_mac_setup_he_6ghz_cap(cap, band_cap);
}
ath12k_mac_copy_eht_cap(band_cap, &he_cap->he_cap_elem, i,
&data[idx].eht_cap);
idx++;
}
return idx;
}
static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
struct ath12k_pdev_cap *cap)
{
struct ieee80211_supported_band *sband;
enum nl80211_band band;
int count;
if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
band = NL80211_BAND_2GHZ;
count = ath12k_mac_copy_sband_iftype_data(ar, cap,
ar->mac.iftype[band],
band);
sband = &ar->mac.sbands[band];
sband->iftype_data = ar->mac.iftype[band];
sband->n_iftype_data = count;
}
if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
band = NL80211_BAND_5GHZ;
count = ath12k_mac_copy_sband_iftype_data(ar, cap,
ar->mac.iftype[band],
band);
sband = &ar->mac.sbands[band];
sband->iftype_data = ar->mac.iftype[band];
sband->n_iftype_data = count;
}
if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
ar->supports_6ghz) {
band = NL80211_BAND_6GHZ;
count = ath12k_mac_copy_sband_iftype_data(ar, cap,
ar->mac.iftype[band],
band);
sband = &ar->mac.sbands[band];
sband->iftype_data = ar->mac.iftype[band];
sband->n_iftype_data = count;
}
}
static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ath12k_check_chain_mask(ar, tx_ant, true))
return -EINVAL;
if (ath12k_check_chain_mask(ar, rx_ant, false))
return -EINVAL;
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
if (ar->state != ATH12K_STATE_ON &&
ar->state != ATH12K_STATE_RESTARTED)
return 0;
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
tx_ant, ar->pdev->pdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
ret, tx_ant);
return ret;
}
ar->num_tx_chains = hweight32(tx_ant);
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
rx_ant, ar->pdev->pdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
ret, rx_ant);
return ret;
}
ar->num_rx_chains = hweight32(rx_ant);
/* Reload HT/VHT/HE capability */
ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
ath12k_mac_setup_sband_iftype_data(ar, &ar->pdev->cap);
return 0;
}
static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
ieee80211_free_txskb(ar->hw, skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
if (num_mgmt < 0)
WARN_ON_ONCE(1);
if (!num_mgmt)
wake_up(&ar->txmgmt_empty_waitq);
}
int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
{
struct sk_buff *msdu = skb;
struct ieee80211_tx_info *info;
struct ath12k *ar = ctx;
struct ath12k_base *ab = ar->ab;
spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
return 0;
}
static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
{
struct ieee80211_vif *vif = ctx;
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct sk_buff *msdu = skb;
struct ath12k *ar = skb_cb->ar;
struct ath12k_base *ab = ar->ab;
if (skb_cb->vif == vif) {
spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
DMA_TO_DEVICE);
}
return 0;
}
static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb)
{
struct ath12k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info;
dma_addr_t paddr;
int buf_id;
int ret;
ATH12K_SKB_CB(skb)->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
ATH12K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
spin_unlock_bh(&ar->txmgmt_idr_lock);
if (buf_id < 0)
return -ENOSPC;
info = IEEE80211_SKB_CB(skb);
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(skb, IEEE80211_CCMP_MIC_LEN);
}
}
paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, paddr)) {
ath12k_warn(ab, "failed to DMA map mgmt Tx buffer\n");
ret = -EIO;
goto err_free_idr;
}
ATH12K_SKB_CB(skb)->paddr = paddr;
ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
goto err_unmap_buf;
}
return 0;
err_unmap_buf:
dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
skb->len, DMA_TO_DEVICE);
err_free_idr:
spin_lock_bh(&ar->txmgmt_idr_lock);
idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
return ret;
}
static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
{
struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
struct ath12k_skb_cb *skb_cb;
struct ath12k_vif *arvif;
struct sk_buff *skb;
int ret;
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
skb_cb = ATH12K_SKB_CB(skb);
if (!skb_cb->vif) {
ath12k_warn(ar->ab, "no vif found for mgmt frame\n");
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
continue;
}
arvif = ath12k_vif_to_arvif(skb_cb->vif);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
arvif->is_started) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
arvif->vdev_id, ret);
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
} else {
ath12k_warn(ar->ab,
"dropping mgmt frame for vdev %d, is_started %d\n",
arvif->vdev_id,
arvif->is_started);
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
}
}
static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
bool is_prb_rsp)
{
struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
/* Drop probe response packets when the pending management tx
* count has reached a certain threshold, so as to prioritize
* other mgmt packets like auth and assoc to be sent on time
* for establishing successful connections.
*/
if (is_prb_rsp &&
atomic_read(&ar->num_pending_mgmt_tx) > ATH12K_PRB_RSP_DROP_THRESHOLD) {
ath12k_warn(ar->ab,
"dropping probe response as pending queue is almost full\n");
return -ENOSPC;
}
if (skb_queue_len_lockless(q) >= ATH12K_TX_MGMT_NUM_PENDING_MAX) {
ath12k_warn(ar->ab, "mgmt tx queue is full\n");
return -ENOSPC;
}
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
return 0;
}
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct ath12k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
u32 info_flags = info->flags;
bool is_prb_rsp;
int ret;
memset(skb_cb, 0, sizeof(*skb_cb));
skb_cb->vif = vif;
if (key) {
skb_cb->cipher = key->cipher;
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
ath12k_warn(ar->ab, "failed to queue management frame %d\n",
ret);
ieee80211_free_txskb(ar->hw, skb);
}
return;
}
ret = ath12k_dp_tx(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
}
}
void ath12k_mac_drain_tx(struct ath12k *ar)
{
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
cancel_work_sync(&ar->wmi_mgmt_tx_work);
ath12k_mgmt_over_wmi_tx_purge(ar);
}
static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
{
return -ENOTSUPP;
/* TODO: Need to support new monitor mode */
}
static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
{
int recovery_start_count;
if (!ab->is_reset)
return;
recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
if (recovery_start_count == ab->num_radios) {
complete(&ab->recovery_start);
ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n");
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n");
wait_for_completion_timeout(&ab->reconfigure_complete,
ATH12K_RECONFIGURE_TIMEOUT_HZ);
}
static int ath12k_mac_op_start(struct ieee80211_hw *hw)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev = ar->pdev;
int ret;
ath12k_mac_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
case ATH12K_STATE_OFF:
ar->state = ATH12K_STATE_ON;
break;
case ATH12K_STATE_RESTARTING:
ar->state = ATH12K_STATE_RESTARTED;
ath12k_mac_wait_reconfigure(ab);
break;
case ATH12K_STATE_RESTARTED:
case ATH12K_STATE_WEDGED:
case ATH12K_STATE_ON:
WARN_ON(1);
ret = -EINVAL;
goto err;
}
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
1, pdev->pdev_id);
if (ret) {
ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
goto err;
}
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
pdev->pdev_id);
if (ret) {
ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
goto err;
}
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
0, pdev->pdev_id);
if (ret) {
ath12k_err(ab, "failed to set ac override for ARP: %d\n",
ret);
goto err;
}
ret = ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
if (ret) {
ath12k_err(ab, "failed to offload radar detection: %d\n",
ret);
goto err;
}
ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_DEFAULT);
if (ret) {
ath12k_err(ab, "failed to req ppdu stats: %d\n", ret);
goto err;
}
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
1, pdev->pdev_id);
if (ret) {
ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
goto err;
}
__ath12k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
/* TODO: Do we need to enable ANI? */
ath12k_reg_update_chan_list(ar);
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
ar->num_peers = 0;
ar->allocated_vdev_map = 0;
/* Configure monitor status ring with default rx_filter to get rx status
* such as rssi, rx_duration.
*/
ret = ath12k_mac_config_mon_status_default(ar, true);
if (ret && (ret != -ENOTSUPP)) {
ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
ret);
goto err;
}
if (ret == -ENOTSUPP)
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"monitor status config is not yet supported");
/* Configure the hash seed for hash based reo dest ring selection */
ath12k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
/* allow device to enter IMPS */
if (ab->hw_params->idle_ps) {
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1, pdev->pdev_id);
if (ret) {
ath12k_err(ab, "failed to enable idle ps: %d\n", ret);
goto err;
}
}
mutex_unlock(&ar->conf_mutex);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
&ab->pdevs[ar->pdev_idx]);
return 0;
err:
ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
{
struct ath12k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
ath12k_mac_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_config_mon_status_default(ar, false);
if (ret && (ret != -ENOTSUPP))
ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
ret);
clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
kfree(ppdu_stats);
}
spin_unlock_bh(&ar->data_lock);
rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
static u8
ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
{
struct ath12k_base *ab = arvif->ar->ab;
u8 vdev_stats_id = 0;
do {
if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
vdev_stats_id++;
if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
break;
}
} else {
ab->free_vdev_stats_id_map |= (1LL << vdev_stats_id);
break;
}
} while (vdev_stats_id);
arvif->vdev_stats_id = vdev_stats_id;
return vdev_stats_id;
}
static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
struct ath12k_wmi_vdev_create_arg *arg)
{
struct ath12k *ar = arvif->ar;
struct ath12k_pdev *pdev = ar->pdev;
arg->if_id = arvif->vdev_id;
arg->type = arvif->vdev_type;
arg->subtype = arvif->vdev_subtype;
arg->pdev_id = pdev->pdev_id;
if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
}
if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
}
if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
ar->supports_6ghz) {
arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
}
arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
}
static u32
ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype)
{
struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
struct ath12k_band_cap *cap_band = NULL;
u32 *hecap_phy_ptr = NULL;
u32 hemode;
if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
else
cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
hecap_phy_ptr = &cap_band->he_cap_phy_info[0];
hemode = u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE) |
u32_encode_bits(HECAP_PHY_SUBFMR_GET(hecap_phy_ptr),
HE_MODE_SU_TX_BFER) |
u32_encode_bits(HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr),
HE_MODE_UL_MUMIMO);
/* TODO: WDS and other modes */
if (viftype == NL80211_IFTYPE_AP) {
hemode |= u32_encode_bits(HECAP_PHY_MUBFMR_GET(hecap_phy_ptr),
HE_MODE_MU_TX_BFER) |
u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
} else {
hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE);
}
return hemode;
}
static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
struct ath12k_vif *arvif)
{
u32 param_id, param_value;
struct ath12k_base *ab = ar->ab;
int ret;
param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type);
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n",
arvif->vdev_id, ret, param_value);
return ret;
}
param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
param_value =
u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) |
u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE,
HE_TRIG_NONTRIG_SOUNDING_MODE);
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d\n",
arvif->vdev_id, ret);
return ret;
}
return ret;
}
static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
u32 param_id, param_value;
int ret;
param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
if (vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_AP)
vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
IEEE80211_OFFLOAD_DECAP_ENABLED);
if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET;
else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
arvif->tx_encap_type = ATH12K_HW_TXRX_RAW;
else
arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, arvif->tx_encap_type);
if (ret) {
ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
arvif->vdev_id, ret);
vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
}
param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
param_value = ATH12K_HW_TXRX_ETHERNET;
else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
param_value = ATH12K_HW_TXRX_RAW;
else
param_value = ATH12K_HW_TXRX_NATIVE_WIFI;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath12k_warn(ab, "failed to set vdev %d rx decap mode: %d\n",
arvif->vdev_id, ret);
vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED;
}
}
static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
struct ath12k_wmi_peer_create_arg peer_param;
u32 param_id, param_value;
u16 nss;
int i;
int ret;
int bit;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
mutex_lock(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_AP &&
ar->num_peers > (ar->max_num_peers - 1)) {
ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
ret = -ENOBUFS;
goto err;
}
if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
TARGET_NUM_VDEVS);
ret = -EBUSY;
goto err;
}
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
arvif->vif = vif;
INIT_LIST_HEAD(&arvif->list);
/* Should we initialize any worker to handle connection loss indication
* from firmware in sta mode?
*/
for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
arvif->bitrate_mask.control[i].legacy = 0xffffffff;
memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].ht_mcs));
memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
}
bit = __ffs64(ab->free_vdev_map);
arvif->vdev_id = bit;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
switch (vif->type) {
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = bit;
break;
default:
WARN_ON(1);
break;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
ab->free_vdev_map);
vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1);
for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1);
ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg);
if (ret) {
ath12k_warn(ab, "failed to create WMI vdev %d: %d\n",
arvif->vdev_id, ret);
goto err;
}
ar->num_created_vdevs++;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
spin_lock_bh(&ar->data_lock);
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
ath12k_mac_op_update_vif_offload(hw, vif);
nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_NSS, nss);
if (ret) {
ath12k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
goto err_vdev_del;
}
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
peer_param.vdev_id = arvif->vdev_id;
peer_param.peer_addr = vif->addr;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
ret = ath12k_peer_create(ar, arvif, NULL, &peer_param);
if (ret) {
ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
arvif->vdev_id, ret);
goto err_vdev_del;
}
ret = ath12k_mac_set_kickout(arvif);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
break;
case WMI_VDEV_TYPE_STA:
param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false);
if (ret) {
ath12k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
arvif->vdev_id, ret);
goto err_peer_del;
}
break;
default:
break;
}
arvif->txpower = vif->bss_conf.txpower;
ret = ath12k_mac_txpower_recalc(ar);
if (ret)
goto err_peer_del;
param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
param_value = ar->hw->wiphy->rts_threshold;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
ath12k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
}
ath12k_dp_vdev_tx_attach(ar, arvif);
if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
ath12k_mac_monitor_vdev_create(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
err_peer_del:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
reinit_completion(&ar->peer_delete_done);
ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr,
arvif->vdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
arvif->vdev_id, vif->addr);
goto err;
}
ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
vif->addr);
if (ret)
goto err;
ar->num_peers--;
}
err_vdev_del:
ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->num_created_vdevs--;
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif)
{
struct ath12k_tx_desc_info *tx_desc_info;
struct ath12k_skb_cb *skb_cb;
struct sk_buff *skb;
int i;
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
spin_lock_bh(&dp->tx_desc_lock[i]);
list_for_each_entry(tx_desc_info, &dp->tx_desc_used_list[i],
list) {
skb = tx_desc_info->skb;
if (!skb)
continue;
skb_cb = ATH12K_SKB_CB(skb);
if (skb_cb->vif == vif)
skb_cb->vif = NULL;
}
spin_unlock_bh(&dp->tx_desc_lock[i]);
}
}
static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k *ar = hw->priv;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_base *ab = ar->ab;
unsigned long time_left;
int ret;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
arvif->vdev_id);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
if (ret)
ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
arvif->vdev_id, ret);
}
reinit_completion(&ar->vdev_delete_done);
ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret) {
ath12k_warn(ab, "failed to delete WMI vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_vdev_del;
}
time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
ATH12K_VDEV_DELETE_TIMEOUT_HZ);
if (time_left == 0) {
ath12k_warn(ab, "Timeout in receiving vdev delete response\n");
goto err_vdev_del;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ar->monitor_vdev_id = -1;
ar->monitor_vdev_created = false;
} else if (ar->monitor_vdev_created && !ar->monitor_started) {
ret = ath12k_mac_monitor_vdev_delete(ar);
}
ab->free_vdev_map |= 1LL << (arvif->vdev_id);
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
ar->num_created_vdevs--;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
err_vdev_del:
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
ath12k_peer_cleanup(ar, arvif->vdev_id);
idr_for_each(&ar->txmgmt_idr,
ath12k_mac_vif_txmgmt_idr_remove, vif);
ath12k_mac_vif_unref(&ab->dp, vif);
ath12k_dp_tx_put_bank_profile(&ab->dp, arvif->bank_id);
/* Recalc txpower for remaining vdev */
ath12k_mac_txpower_recalc(ar);
clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
}
/* FIXME: Has to be verified. */
#define SUPPORTED_FILTERS \
(FIF_ALLMULTI | \
FIF_CONTROL | \
FIF_PSPOLL | \
FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_PROBE_REQ | \
FIF_FCSFAIL)
static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
u64 multicast)
{
struct ath12k *ar = hw->priv;
bool reset_flag;
int ret;
mutex_lock(&ar->conf_mutex);
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
/* For monitor mode */
reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
if (!ret) {
if (!reset_flag)
set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
else
clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
} else {
ath12k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"total_flags:0x%x, reset_flag:%d\n",
*total_flags, reset_flag);
mutex_unlock(&ar->conf_mutex);
}
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct ath12k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
*tx_ant = ar->cfg_tx_chainmask;
*rx_ant = ar->cfg_rx_chainmask;
mutex_unlock(&ar->conf_mutex);
return 0;
}
static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct ath12k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
struct ath12k *ar = hw->priv;
int ret = -EINVAL;
mutex_lock(&ar->conf_mutex);
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
ret = ath12k_dp_rx_ampdu_start(ar, params);
break;
case IEEE80211_AMPDU_RX_STOP:
ret = ath12k_dp_rx_ampdu_stop(ar, params);
break;
case IEEE80211_AMPDU_TX_START:
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
case IEEE80211_AMPDU_TX_OPERATIONAL:
/* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211
* Tx aggregation requests.
*/
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx add freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
/* TODO: In case of multiple channel context, populate rx_channel from
* Rx PPDU desc information.
*/
ar->rx_channel = ctx->def.chan;
spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
return 0;
}
static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx remove freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
/* TODO: In case of there is one more channel context left, populate
* rx_channel with the channel of that remaining channel context.
*/
ar->rx_channel = NULL;
spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
}
static int
ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
const struct cfg80211_chan_def *chandef,
bool restart)
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
struct wmi_vdev_start_req_arg arg = {};
int he_support = arvif->vif->bss_conf.he_support;
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
arg.vdev_id = arvif->vdev_id;
arg.dtim_period = arvif->dtim_period;
arg.bcn_intval = arvif->beacon_interval;
arg.punct_bitmap = ~arvif->punct_bitmap;
arg.freq = chandef->chan->center_freq;
arg.band_center_freq1 = chandef->center_freq1;
arg.band_center_freq2 = chandef->center_freq2;
arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width];
arg.min_power = 0;
arg.max_power = chandef->chan->max_power * 2;
arg.max_reg_power = chandef->chan->max_reg_power * 2;
arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
arg.ssid_len = arvif->u.ap.ssid_len;
arg.hidden_ssid = arvif->u.ap.hidden_ssid;
/* For now allow DFS for AP mode */
arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
arg.passive = arg.chan_radar;
spin_lock_bh(&ab->base_lock);
arg.regdomain = ar->ab->dfs_region;
spin_unlock_bh(&ab->base_lock);
/* TODO: Notify if secondary 80Mhz also needs radar detection */
if (he_support) {
ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
if (ret) {
ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
arg.vdev_id);
return ret;
}
}
}
arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n",
arg.vdev_id, arg.freq,
ath12k_mac_phymode_str(arg.mode), arg.punct_bitmap);
ret = ath12k_wmi_vdev_start(ar, &arg, restart);
if (ret) {
ath12k_warn(ar->ab, "failed to %s WMI vdev %i\n",
restart ? "restart" : "start", arg.vdev_id);
return ret;
}
ret = ath12k_mac_vdev_setup_sync(ar);
if (ret) {
ath12k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n",
arg.vdev_id, restart ? "restart" : "start", ret);
return ret;
}
ar->num_started_vdevs++;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
/* Enable CAC Flag in the driver by checking the channel DFS cac time,
* i.e dfs_cac_ms value which will be valid only for radar channels
* and state as NL80211_DFS_USABLE which indicates CAC needs to be
* done before channel usage. This flags is used to drop rx packets.
* during CAC.
*/
/* TODO: Set the flag for other interface types as required */
if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
chandef->chan->dfs_cac_ms &&
chandef->chan->dfs_state == NL80211_DFS_USABLE) {
set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
ath12k_dbg(ab, ATH12K_DBG_MAC,
"CAC Started in chan_freq %d for vdev %d\n",
arg.freq, arg.vdev_id);
}
ret = ath12k_mac_set_txbf_conf(arvif);
if (ret)
ath12k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
arvif->vdev_id, ret);
return 0;
}
static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
ret = ath12k_mac_vdev_setup_sync(ar);
if (ret) {
ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
WARN_ON(ar->num_started_vdevs == 0);
ar->num_started_vdevs--;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
arvif->vdev_id);
}
return 0;
err:
return ret;
}
static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
const struct cfg80211_chan_def *chandef)
{
return ath12k_mac_vdev_start_restart(arvif, chandef, false);
}
static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif,
const struct cfg80211_chan_def *chandef)
{
return ath12k_mac_vdev_start_restart(arvif, chandef, true);
}
struct ath12k_mac_change_chanctx_arg {
struct ieee80211_chanctx_conf *ctx;
struct ieee80211_vif_chanctx_switch *vifs;
int n_vifs;
int next_vif;
};
static void
ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath12k_mac_change_chanctx_arg *arg = data;
if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
return;
arg->n_vifs++;
}
static void
ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath12k_mac_change_chanctx_arg *arg = data;
struct ieee80211_chanctx_conf *ctx;
ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
if (ctx != arg->ctx)
return;
if (WARN_ON(arg->next_vif == arg->n_vifs))
return;
arg->vifs[arg->next_vif].vif = vif;
arg->vifs[arg->next_vif].old_ctx = ctx;
arg->vifs[arg->next_vif].new_ctx = ctx;
arg->next_vif++;
}
static void
ath12k_mac_update_vif_chan(struct ath12k *ar,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif;
int ret;
int i;
bool monitor_vif = false;
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < n_vifs; i++) {
arvif = (void *)vifs[i].vif->drv_priv;
if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
monitor_vif = true;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
arvif->vdev_id,
vifs[i].old_ctx->def.chan->center_freq,
vifs[i].new_ctx->def.chan->center_freq,
vifs[i].old_ctx->def.width,
vifs[i].new_ctx->def.width);
if (WARN_ON(!arvif->is_started))
continue;
if (WARN_ON(!arvif->is_up))
continue;
ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret) {
ath12k_warn(ab, "failed to down vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
/* All relevant vdevs are downed and associated channel resources
* should be available for the channel switch now.
*/
/* TODO: Update ar->rx_channel */
for (i = 0; i < n_vifs; i++) {
arvif = (void *)vifs[i].vif->drv_priv;
if (WARN_ON(!arvif->is_started))
continue;
if (WARN_ON(!arvif->is_up))
continue;
ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
if (ret) {
ath12k_warn(ab, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
ret = ath12k_mac_setup_bcn_tmpl(arvif);
if (ret)
ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
ath12k_warn(ab, "failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
/* Restart the internal monitor vdev on new channel */
if (!monitor_vif && ar->monitor_vdev_created) {
if (!ath12k_mac_monitor_stop(ar))
ath12k_mac_monitor_start(ar);
}
}
static void
ath12k_mac_update_active_vif_chan(struct ath12k *ar,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
lockdep_assert_held(&ar->conf_mutex);
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath12k_mac_change_chanctx_cnt_iter,
&arg);
if (arg.n_vifs == 0)
return;
arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
if (!arg.vifs)
return;
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath12k_mac_change_chanctx_fill_iter,
&arg);
ath12k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
kfree(arg.vifs);
}
static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx change freq %u width %d ptr %pK changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
* switch_vif_chanctx().
*/
if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
goto unlock;
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
ath12k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
unlock:
mutex_unlock(&ar->conf_mutex);
}
static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
int ret;
if (WARN_ON(arvif->is_started))
return -EBUSY;
ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx.def);
if (ret) {
ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
arvif->chanctx.def.chan->center_freq, ret);
return ret;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id);
if (ret) {
ath12k_warn(ab, "failed put monitor up: %d\n", ret);
return ret;
}
}
arvif->is_started = true;
/* TODO: Setup ps and cts/rts protection */
return 0;
}
static int
ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
int ret;
struct ath12k_wmi_peer_create_arg param;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
arvif->punct_bitmap = link_conf->eht_puncturing;
/* for some targets bss peer must be created before vdev_start */
if (ab->hw_params->vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
!ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
}
if (WARN_ON(arvif->is_started)) {
ret = -EBUSY;
goto out;
}
if (ab->hw_params->vdev_start_delay &&
(arvif->vdev_type == WMI_VDEV_TYPE_AP ||
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr;
ret = ath12k_peer_create(ar, arvif, NULL, ¶m);
if (ret) {
ath12k_warn(ab, "failed to create peer after vdev start delay: %d",
ret);
goto out;
}
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_start(ar);
if (ret)
goto out;
arvif->is_started = true;
goto out;
}
ret = ath12k_mac_vdev_start(arvif, &ctx->def);
if (ret) {
ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
ctx->def.chan->center_freq, ret);
goto out;
}
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created)
ath12k_mac_monitor_start(ar);
arvif->is_started = true;
/* TODO: Setup ps and cts/rts protection */
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void
ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif = (void *)vif->drv_priv;
int ret;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx unassign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
if (ab->hw_params->vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
ath12k_peer_find_by_addr(ab, ar->mac_addr))
ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_stop(ar);
if (ret) {
mutex_unlock(&ar->conf_mutex);
return;
}
arvif->is_started = false;
}
ret = ath12k_mac_vdev_stop(arvif);
if (ret)
ath12k_warn(ab, "failed to stop vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->is_started = false;
if (ab->hw_params->vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ath12k_wmi_vdev_down(ar, arvif->vdev_id);
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
ath12k_mac_monitor_stop(ar);
mutex_unlock(&ar->conf_mutex);
}
static int
ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
struct ath12k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac chanctx switch n_vifs %d mode %d\n",
n_vifs, mode);
ath12k_mac_update_vif_chan(ar, vifs, n_vifs);
mutex_unlock(&ar->conf_mutex);
return 0;
}
static int
ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
{
struct ath12k_vif *arvif;
int ret = 0;
mutex_lock(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
param, arvif->vdev_id, value);
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param, value);
if (ret) {
ath12k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
param, arvif->vdev_id, ret);
break;
}
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
/* mac80211 stores device specific RTS/Fragmentation threshold value,
* this is set interface specific to firmware from ath12k driver
*/
static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct ath12k *ar = hw->priv;
int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
}
static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
{
/* Even though there's a WMI vdev param for fragmentation threshold no
* known firmware actually implements it. Moreover it is not possible to
* rely frame fragmentation to mac80211 because firmware clears the
* "more fragments" bit in frame control making it impossible for remote
* devices to reassemble frames.
*
* Hence implement a dummy callback just to say fragmentation isn't
* supported. This effectively prevents mac80211 from doing frame
* fragmentation in software.
*/
return -EOPNOTSUPP;
}
static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath12k *ar = hw->priv;
long time_left;
if (drop)
return;
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH12K_FLUSH_TIMEOUT);
if (time_left == 0)
ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
(atomic_read(&ar->num_pending_mgmt_tx) == 0),
ATH12K_FLUSH_TIMEOUT);
if (time_left == 0)
ath12k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n",
time_left);
}
static int
ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
int i;
for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
num_rates += hweight16(mask->control[band].ht_mcs[i]);
return num_rates;
}
static bool
ath12k_mac_has_single_legacy_rate(struct ath12k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
num_rates = hweight32(mask->control[band].legacy);
if (ath12k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
return false;
if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
return false;
return num_rates == 1;
}
static bool
ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
int *nss)
{
struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
u8 ht_nss_mask = 0;
u8 vht_nss_mask = 0;
int i;
/* No need to consider legacy here. Basic rates are always present
* in bitrate mask
*/
for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
if (mask->control[band].ht_mcs[i] == 0)
continue;
else if (mask->control[band].ht_mcs[i] ==
sband->ht_cap.mcs.rx_mask[i])
ht_nss_mask |= BIT(i);
else
return false;
}
for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
if (mask->control[band].vht_mcs[i] == 0)
continue;
else if (mask->control[band].vht_mcs[i] ==
ath12k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
vht_nss_mask |= BIT(i);
else
return false;
}
if (ht_nss_mask != vht_nss_mask)
return false;
if (ht_nss_mask == 0)
return false;
if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
return false;
*nss = fls(ht_nss_mask);
return true;
}
static int
ath12k_mac_get_single_legacy_rate(struct ath12k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
u32 *rate, u8 *nss)
{
int rate_idx;
u16 bitrate;
u8 preamble;
u8 hw_rate;
if (hweight32(mask->control[band].legacy) != 1)
return -EINVAL;
rate_idx = ffs(mask->control[band].legacy) - 1;
if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ)
rate_idx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
hw_rate = ath12k_legacy_rates[rate_idx].hw_value;
bitrate = ath12k_legacy_rates[rate_idx].bitrate;
if (ath12k_mac_bitrate_is_cck(bitrate))
preamble = WMI_RATE_PREAMBLE_CCK;
else
preamble = WMI_RATE_PREAMBLE_OFDM;
*nss = 1;
*rate = ATH12K_HW_RATE_CODE(hw_rate, 0, preamble);
return 0;
}
static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif,
u32 rate, u8 nss, u8 sgi, u8 ldpc)
{
struct ath12k *ar = arvif->ar;
u32 vdev_param;
int ret;
lockdep_assert_held(&ar->conf_mutex);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
arvif->vdev_id, rate, nss, sgi);
vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, rate);
if (ret) {
ath12k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
rate, ret);
return ret;
}
vdev_param = WMI_VDEV_PARAM_NSS;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, nss);
if (ret) {
ath12k_warn(ar->ab, "failed to set nss param %d: %d\n",
nss, ret);
return ret;
}
vdev_param = WMI_VDEV_PARAM_SGI;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, sgi);
if (ret) {
ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n",
sgi, ret);
return ret;
}
vdev_param = WMI_VDEV_PARAM_LDPC;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
vdev_param, ldpc);
if (ret) {
ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
ldpc, ret);
return ret;
}
return 0;
}
static bool
ath12k_mac_vht_mcs_range_present(struct ath12k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int i;
u16 vht_mcs;
for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
vht_mcs = mask->control[band].vht_mcs[i];
switch (vht_mcs) {
case 0:
case BIT(8) - 1:
case BIT(9) - 1:
case BIT(10) - 1:
break;
default:
return false;
}
}
return true;
}
static void ath12k_mac_set_bitrate_mask_iter(void *data,
struct ieee80211_sta *sta)
{
struct ath12k_vif *arvif = data;
struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
struct ath12k *ar = arvif->ar;
spin_lock_bh(&ar->data_lock);
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
ieee80211_queue_work(ar->hw, &arsta->update_wk);
}
static void ath12k_mac_disable_peer_fixed_rate(void *data,
struct ieee80211_sta *sta)
{
struct ath12k_vif *arvif = data;
struct ath12k *ar = arvif->ar;
int ret;
ret = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
WMI_FIXED_RATE_NONE);
if (ret)
ath12k_warn(ar->ab,
"failed to disable peer fixed rate for STA %pM ret %d\n",
sta->addr, ret);
}
static int
ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
struct ath12k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
struct ath12k *ar = arvif->ar;
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
u32 rate;
u8 nss;
u8 sgi;
u8 ldpc;
int single_nss;
int ret;
int num_rates;
if (ath12k_mac_vif_chan(vif, &def))
return -EPERM;
band = def.chan->band;
ht_mcs_mask = mask->control[band].ht_mcs;
vht_mcs_mask = mask->control[band].vht_mcs;
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi;
if (sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing at least one of used basic rates along with them.
* Fixed rate setting across different preambles(legacy, HT, VHT) is
* not supported by the FW. Hence use of FIXED_RATE vdev param is not
* suitable for setting single HT/VHT rates.
* But, there could be a single basic rate passed from userspace which
* can be done through the FIXED_RATE param.
*/
if (ath12k_mac_has_single_legacy_rate(ar, band, mask)) {
ret = ath12k_mac_get_single_legacy_rate(ar, band, mask, &rate,
&nss);
if (ret) {
ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ieee80211_iterate_stations_atomic(ar->hw,
ath12k_mac_disable_peer_fixed_rate,
arvif);
} else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
nss = single_nss;
} else {
rate = WMI_FIXED_RATE_NONE;
nss = min_t(u32, ar->num_tx_chains,
max(ath12k_mac_max_ht_nss(ht_mcs_mask),
ath12k_mac_max_vht_nss(vht_mcs_mask)));
/* If multiple rates across different preambles are given
* we can reconfigure this info with all peers using PEER_ASSOC
* command with the below exception cases.
* - Single VHT Rate : peer_assoc command accommodates only MCS
* range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211
* mandates passing basic rates along with HT/VHT rates, FW
* doesn't allow switching from VHT to Legacy. Hence instead of
* setting legacy and VHT rates using RATEMASK_CMD vdev cmd,
* we could set this VHT rate as peer fixed rate param, which
* will override FIXED rate and FW rate control algorithm.
* If single VHT rate is passed along with HT rates, we select
* the VHT rate as fixed rate for vht peers.
* - Multiple VHT Rates : When Multiple VHT rates are given,this
* can be set using RATEMASK CMD which uses FW rate-ctl alg.
* TODO: Setting multiple VHT MCS and replacing peer_assoc with
* RATEMASK_CMDID can cover all use cases of setting rates
* across multiple preambles and rates within same type.
* But requires more validation of the command at this point.
*/
num_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
mask);
if (!ath12k_mac_vht_mcs_range_present(ar, band, mask) &&
num_rates > 1) {
/* TODO: Handle multiple VHT MCS values setting using
* RATEMASK CMD
*/
ath12k_warn(ar->ab,
"Setting more than one MCS Value in bitrate mask not supported\n");
return -EINVAL;
}
ieee80211_iterate_stations_atomic(ar->hw,
ath12k_mac_disable_peer_fixed_rate,
arvif);
mutex_lock(&ar->conf_mutex);
arvif->bitrate_mask = *mask;
ieee80211_iterate_stations_atomic(ar->hw,
ath12k_mac_set_bitrate_mask_iter,
arvif);
mutex_unlock(&ar->conf_mutex);
}
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
if (ret) {
ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
arvif->vdev_id, ret);
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void
ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif;
int recovery_count;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH12K_STATE_RESTARTED) {
ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
ar->pdev->pdev_id);
ar->state = ATH12K_STATE_ON;
ieee80211_wake_queues(ar->hw);
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n",
recovery_count);
/* When there are multiple radios in an SOC,
* the recovery has to be done for each radio
*/
if (recovery_count == ab->num_radios) {
atomic_dec(&ab->reset_count);
complete(&ab->reset_complete);
ab->is_reset = false;
atomic_set(&ab->fail_cont_count, 0);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
}
}
list_for_each_entry(arvif, &ar->arvifs, list) {
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"reconfig cipher %d up %d vdev type %d\n",
arvif->key_cipher,
arvif->is_up,
arvif->vdev_type);
/* After trigger disconnect, then upper layer will
* trigger connect again, then the PN number of
* upper layer will be reset to keep up with AP
* side, hence PN number mismatch will not happen.
*/
if (arvif->is_up &&
arvif->vdev_type == WMI_VDEV_TYPE_STA &&
arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) {
ieee80211_hw_restart_disconnect(arvif->vif);
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"restart disconnect\n");
}
}
}
mutex_unlock(&ar->conf_mutex);
}
static void
ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
struct ieee80211_channel *channel)
{
int ret;
enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
lockdep_assert_held(&ar->conf_mutex);
if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
ar->rx_channel != channel)
return;
if (ar->scan.state != ATH12K_SCAN_IDLE) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"ignoring bss chan info req while scanning..\n");
return;
}
reinit_completion(&ar->bss_survey_done);
ret = ath12k_wmi_pdev_bss_chan_info_request(ar, type);
if (ret) {
ath12k_warn(ar->ab, "failed to send pdev bss chan info request\n");
return;
}
ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
if (ret == 0)
ath12k_warn(ar->ab, "bss channel survey timed out\n");
}
static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct ath12k *ar = hw->priv;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey;
int ret = 0;
if (idx >= ATH12K_NUM_CHANS)
return -ENOENT;
ar_survey = &ar->survey[idx];
mutex_lock(&ar->conf_mutex);
sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
ret = -ENOENT;
goto exit;
}
ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
spin_unlock_bh(&ar->data_lock);
survey->channel = &sband->channels[idx];
if (ar->rx_channel == survey->channel)
survey->filled |= SURVEY_INFO_IN_USE;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
sinfo->tx_duration = arsta->tx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
if (!arsta->txrate.legacy && !arsta->txrate.nss)
return;
if (arsta->txrate.legacy) {
sinfo->txrate.legacy = arsta->txrate.legacy;
} else {
sinfo->txrate.mcs = arsta->txrate.mcs;
sinfo->txrate.nss = arsta->txrate.nss;
sinfo->txrate.bw = arsta->txrate.bw;
sinfo->txrate.he_gi = arsta->txrate.he_gi;
sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
}
sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
/* TODO: Use real NF instead of default one. */
sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
static const struct ieee80211_ops ath12k_ops = {
.tx = ath12k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath12k_mac_op_start,
.stop = ath12k_mac_op_stop,
.reconfig_complete = ath12k_mac_op_reconfig_complete,
.add_interface = ath12k_mac_op_add_interface,
.remove_interface = ath12k_mac_op_remove_interface,
.update_vif_offload = ath12k_mac_op_update_vif_offload,
.config = ath12k_mac_op_config,
.bss_info_changed = ath12k_mac_op_bss_info_changed,
.configure_filter = ath12k_mac_op_configure_filter,
.hw_scan = ath12k_mac_op_hw_scan,
.cancel_hw_scan = ath12k_mac_op_cancel_hw_scan,
.set_key = ath12k_mac_op_set_key,
.sta_state = ath12k_mac_op_sta_state,
.sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
.sta_rc_update = ath12k_mac_op_sta_rc_update,
.conf_tx = ath12k_mac_op_conf_tx,
.set_antenna = ath12k_mac_op_set_antenna,
.get_antenna = ath12k_mac_op_get_antenna,
.ampdu_action = ath12k_mac_op_ampdu_action,
.add_chanctx = ath12k_mac_op_add_chanctx,
.remove_chanctx = ath12k_mac_op_remove_chanctx,
.change_chanctx = ath12k_mac_op_change_chanctx,
.assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx,
.unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx,
.switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx,
.set_rts_threshold = ath12k_mac_op_set_rts_threshold,
.set_frag_threshold = ath12k_mac_op_set_frag_threshold,
.set_bitrate_mask = ath12k_mac_op_set_bitrate_mask,
.get_survey = ath12k_mac_op_get_survey,
.flush = ath12k_mac_op_flush,
.sta_statistics = ath12k_mac_op_sta_statistics,
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
struct ieee80211_supported_band *band,
u32 freq_low, u32 freq_high)
{
int i;
if (!(freq_low && freq_high))
return;
for (i = 0; i < band->n_channels; i++) {
if (band->channels[i].center_freq < freq_low ||
band->channels[i].center_freq > freq_high)
band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
}
static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
{
struct ath12k_pdev *pdev = ar->pdev;
struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
if (band == WMI_HOST_WLAN_2G_CAP)
return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
if (band == WMI_HOST_WLAN_5G_CAP)
return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band);
return 0;
}
static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
u32 supported_bands)
{
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
void *channels;
u32 phy_id;
BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) +
ARRAY_SIZE(ath12k_5ghz_channels) +
ARRAY_SIZE(ath12k_6ghz_channels)) !=
ATH12K_NUM_CHANS);
reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
channels = kmemdup(ath12k_2ghz_channels,
sizeof(ath12k_2ghz_channels),
GFP_KERNEL);
if (!channels)
return -ENOMEM;
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->band = NL80211_BAND_2GHZ;
band->n_channels = ARRAY_SIZE(ath12k_2ghz_channels);
band->channels = channels;
band->n_bitrates = ath12k_g_rates_size;
band->bitrates = ath12k_g_rates;
ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_2ghz_chan,
reg_cap->high_2ghz_chan);
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
channels = kmemdup(ath12k_6ghz_channels,
sizeof(ath12k_6ghz_channels), GFP_KERNEL);
if (!channels) {
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
return -ENOMEM;
}
ar->supports_6ghz = true;
band = &ar->mac.sbands[NL80211_BAND_6GHZ];
band->band = NL80211_BAND_6GHZ;
band->n_channels = ARRAY_SIZE(ath12k_6ghz_channels);
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
}
if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
channels = kmemdup(ath12k_5ghz_channels,
sizeof(ath12k_5ghz_channels),
GFP_KERNEL);
if (!channels) {
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
return -ENOMEM;
}
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->band = NL80211_BAND_5GHZ;
band->n_channels = ARRAY_SIZE(ath12k_5ghz_channels);
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
}
}
return 0;
}
static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
bool ap, mesh;
ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP);
mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT);
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
if (ap || mesh) {
n_limits = 2;
max_interfaces = 16;
} else {
n_limits = 1;
max_interfaces = 1;
}
limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
if (!limits) {
kfree(combinations);
return -ENOMEM;
}
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
if (ap) {
limits[1].max = max_interfaces;
limits[1].types |= BIT(NL80211_IFTYPE_AP);
}
if (mesh)
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
combinations[0].limits = limits;
combinations[0].n_limits = n_limits;
combinations[0].max_interfaces = max_interfaces;
combinations[0].num_different_channels = 1;
combinations[0].beacon_int_infra_match = true;
combinations[0].beacon_int_min_gcd = 100;
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80);
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
return 0;
}
static const u8 ath12k_if_types_ext_capa[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
static const u8 ath12k_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const u8 ath12k_if_types_ext_capa_ap[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
};
static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
{
.extended_capabilities = ath12k_if_types_ext_capa,
.extended_capabilities_mask = ath12k_if_types_ext_capa,
.extended_capabilities_len = sizeof(ath12k_if_types_ext_capa),
}, {
.iftype = NL80211_IFTYPE_STATION,
.extended_capabilities = ath12k_if_types_ext_capa_sta,
.extended_capabilities_mask = ath12k_if_types_ext_capa_sta,
.extended_capabilities_len =
sizeof(ath12k_if_types_ext_capa_sta),
}, {
.iftype = NL80211_IFTYPE_AP,
.extended_capabilities = ath12k_if_types_ext_capa_ap,
.extended_capabilities_mask = ath12k_if_types_ext_capa_ap,
.extended_capabilities_len =
sizeof(ath12k_if_types_ext_capa_ap),
},
};
static void __ath12k_mac_unregister(struct ath12k *ar)
{
cancel_work_sync(&ar->regd_update_work);
ieee80211_unregister_hw(ar->hw);
idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
SET_IEEE80211_DEV(ar->hw, NULL);
}
void ath12k_mac_unregister(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar)
continue;
__ath12k_mac_unregister(ar);
}
}
static int __ath12k_mac_register(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev_cap *cap = &ar->pdev->cap;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WLAN_CIPHER_SUITE_AES_CMAC,
WLAN_CIPHER_SUITE_BIP_CMAC_256,
WLAN_CIPHER_SUITE_BIP_GMAC_128,
WLAN_CIPHER_SUITE_BIP_GMAC_256,
WLAN_CIPHER_SUITE_GCMP,
WLAN_CIPHER_SUITE_GCMP_256,
WLAN_CIPHER_SUITE_CCMP_256,
};
int ret;
u32 ht_cap = 0;
ath12k_pdev_caps_update(ar);
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
SET_IEEE80211_DEV(ar->hw, ab->dev);
ret = ath12k_mac_setup_channels_rates(ar,
cap->supported_bands);
if (ret)
goto err;
ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
ath12k_mac_setup_sband_iftype_data(ar, cap);
ret = ath12k_mac_setup_iface_combinations(ar);
if (ret) {
ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
goto err_free_channels;
}
ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
ar->hw->wiphy->interface_modes = ab->hw_params->interface_modes;
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(ar->hw, MFP_CAPABLE);
ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(ar->hw, AP_LINK_PS);
ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
if (ht_cap & WMI_HT_CAP_ENABLED) {
ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(ar->hw, USES_RSS);
}
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
/* TODO: Check if HT capability advertised from firmware is different
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
ar->hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
ar->hw->queues = ATH12K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
ar->hw->vif_data_size = sizeof(struct ath12k_vif);
ar->hw->sta_data_size = sizeof(struct ath12k_sta);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
ar->hw->wiphy->cipher_suites = cipher_suites;
ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
ar->hw->wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
ar->hw->wiphy->num_iftype_ext_capab =
ARRAY_SIZE(ath12k_iftypes_ext_capa);
if (ar->supports_6ghz) {
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_FILS_DISCOVERY);
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
}
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_PUNCT);
ath12k_reg_init(ar);
if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
ar->hw->netdev_features = NETIF_F_HW_CSUM;
ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
}
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
goto err_free_if_combs;
}
if (!ab->hw_params->supports_monitor)
/* There's a race between calling ieee80211_register_hw()
* and here where the monitor mode is enabled for a little
* while. But that time is so short and in practise it make
* a difference in real life.
*/
ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
/* Apply the regd received during initialization */
ret = ath12k_regd_update(ar, true);
if (ret) {
ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
goto err_unregister_hw;
}
return 0;
err_unregister_hw:
ieee80211_unregister_hw(ar->hw);
err_free_if_combs:
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
err_free_channels:
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
err:
SET_IEEE80211_DEV(ar->hw, NULL);
return ret;
}
int ath12k_mac_register(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev *pdev;
int i;
int ret;
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ab->pdevs_macaddr_valid) {
ether_addr_copy(ar->mac_addr, pdev->mac_addr);
} else {
ether_addr_copy(ar->mac_addr, ab->mac_addr);
ar->mac_addr[4] += i;
}
ret = __ath12k_mac_register(ar);
if (ret)
goto err_cleanup;
init_waitqueue_head(&ar->txmgmt_empty_waitq);
idr_init(&ar->txmgmt_idr);
spin_lock_init(&ar->txmgmt_idr_lock);
}
/* Initialize channel counters frequency value in hertz */
ab->cc_freq_hz = 320000;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
return 0;
err_cleanup:
for (i = i - 1; i >= 0; i--) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
__ath12k_mac_unregister(ar);
}
return ret;
}
int ath12k_mac_allocate(struct ath12k_base *ab)
{
struct ieee80211_hw *hw;
struct ath12k *ar;
struct ath12k_pdev *pdev;
int ret;
int i;
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops);
if (!hw) {
ath12k_warn(ab, "failed to allocate mac80211 hw device\n");
ret = -ENOMEM;
goto err_free_mac;
}
ar = hw->priv;
ar->hw = hw;
ar->ab = ab;
ar->pdev = pdev;
ar->pdev_idx = i;
ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i);
ar->wmi = &ab->wmi_ab.wmi[i];
/* FIXME: wmi[0] is already initialized during attach,
* Should we do this again?
*/
ath12k_wmi_pdev_attach(ab, i);
ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
pdev->ar = ar;
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->arvifs);
INIT_LIST_HEAD(&ar->ppdu_stats_info);
mutex_init(&ar->conf_mutex);
init_completion(&ar->vdev_setup_done);
init_completion(&ar->vdev_delete_done);
init_completion(&ar->peer_assoc_done);
init_completion(&ar->peer_delete_done);
init_completion(&ar->install_key_done);
init_completion(&ar->bss_survey_done);
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
}
return 0;
err_free_mac:
ath12k_mac_destroy(ab);
return ret;
}
void ath12k_mac_destroy(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar)
continue;
ieee80211_free_hw(ar->hw);
pdev->ar = NULL;
}
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/mac.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include "pci.h"
#include "core.h"
#include "hif.h"
#include "mhi.h"
#include "debug.h"
#define ATH12K_PCI_BAR_NUM 0
#define ATH12K_PCI_DMA_MASK 32
#define ATH12K_PCI_IRQ_CE0_OFFSET 3
#define WINDOW_ENABLE_BIT 0x40000000
#define WINDOW_REG_ADDRESS 0x310c
#define WINDOW_VALUE_MASK GENMASK(24, 19)
#define WINDOW_START 0x80000
#define WINDOW_RANGE_MASK GENMASK(18, 0)
#define WINDOW_STATIC_MASK GENMASK(31, 6)
#define TCSR_SOC_HW_VERSION 0x1B00000
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4)
/* BAR0 + 4k is always accessible, and no
* need to force wakeup.
* 4K - 32 = 0xFE0
*/
#define ACCESS_ALWAYS_OFF 0xFE0
#define QCN9274_DEVICE_ID 0x1109
#define WCN7850_DEVICE_ID 0x1107
static const struct pci_device_id ath12k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
{0}
};
MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table);
/* TODO: revisit IRQ mapping for new SRNG's */
static const struct ath12k_msi_config ath12k_msi_config[] = {
{
.total_vectors = 16,
.total_users = 3,
.users = (struct ath12k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
},
},
};
static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
"bhi",
"mhi-er0",
"mhi-er1",
"ce0",
"ce1",
"ce2",
"ce3",
"ce4",
"ce5",
"ce6",
"ce7",
"ce8",
"ce9",
"ce10",
"ce11",
"ce12",
"ce13",
"ce14",
"ce15",
"host2wbm-desc-feed",
"host2reo-re-injection",
"host2reo-command",
"host2rxdma-monitor-ring3",
"host2rxdma-monitor-ring2",
"host2rxdma-monitor-ring1",
"reo2ost-exception",
"wbm2host-rx-release",
"reo2host-status",
"reo2host-destination-ring4",
"reo2host-destination-ring3",
"reo2host-destination-ring2",
"reo2host-destination-ring1",
"rxdma2host-monitor-destination-mac3",
"rxdma2host-monitor-destination-mac2",
"rxdma2host-monitor-destination-mac1",
"ppdu-end-interrupts-mac3",
"ppdu-end-interrupts-mac2",
"ppdu-end-interrupts-mac1",
"rxdma2host-monitor-status-ring-mac3",
"rxdma2host-monitor-status-ring-mac2",
"rxdma2host-monitor-status-ring-mac1",
"host2rxdma-host-buf-ring-mac3",
"host2rxdma-host-buf-ring-mac2",
"host2rxdma-host-buf-ring-mac1",
"rxdma2host-destination-ring-mac3",
"rxdma2host-destination-ring-mac2",
"rxdma2host-destination-ring-mac1",
"host2tcl-input-ring4",
"host2tcl-input-ring3",
"host2tcl-input-ring2",
"host2tcl-input-ring1",
"wbm2host-tx-completions-ring4",
"wbm2host-tx-completions-ring3",
"wbm2host-tx-completions-ring2",
"wbm2host-tx-completions-ring1",
"tcl2host-status-ring",
};
static int ath12k_pci_bus_wake_up(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
}
static void ath12k_pci_bus_release(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
}
static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = {
.wakeup = NULL,
.release = NULL,
};
static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = {
.wakeup = ath12k_pci_bus_wake_up,
.release = ath12k_pci_bus_release,
};
static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset)
{
struct ath12k_base *ab = ab_pci->ab;
u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK);
u32 static_window;
lockdep_assert_held(&ab_pci->window_lock);
/* Preserve the static window configuration and reset only dynamic window */
static_window = ab_pci->register_window & WINDOW_STATIC_MASK;
window |= static_window;
if (window != ab_pci->register_window) {
iowrite32(WINDOW_ENABLE_BIT | window,
ab->mem + WINDOW_REG_ADDRESS);
ioread32(ab->mem + WINDOW_REG_ADDRESS);
ab_pci->register_window = window;
}
}
static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci)
{
u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK);
u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK);
u32 window;
window = (umac_window << 12) | (ce_window << 6);
spin_lock_bh(&ab_pci->window_lock);
ab_pci->register_window = window;
spin_unlock_bh(&ab_pci->window_lock);
iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
}
static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
u32 offset)
{
u32 window_start;
/* If offset lies within DP register range, use 3rd window */
if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
window_start = 3 * WINDOW_START;
/* If offset lies within CE register range, use 2nd window */
else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
window_start = 2 * WINDOW_START;
/* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
* use 0th window
*/
else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
!((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
window_start = 0;
else
window_start = WINDOW_START;
return window_start;
}
static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
{
u32 val, delay;
val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
val |= PCIE_SOC_GLOBAL_RESET_V;
ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
/* TODO: exact time to sleep is uncertain */
delay = 10;
mdelay(delay);
/* Need to toggle V bit back otherwise stuck in reset status */
val &= ~PCIE_SOC_GLOBAL_RESET_V;
ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
mdelay(delay);
val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
if (val == 0xffffffff)
ath12k_warn(ab, "link down error during global reset\n");
}
static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab)
{
u32 val;
/* read cookie */
val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val);
val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
/* TODO: exact time to sleep is uncertain */
mdelay(10);
/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
* continuing warm path and entering dead loop.
*/
ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
mdelay(10);
val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
/* A read clear register. clear the register to prevent
* Q6 from entering wrong code path.
*/
val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val);
}
static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
{
u32 val;
int i;
val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
/* PCIE link seems very unstable after the Hot Reset*/
for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
if (val == 0xffffffff)
mdelay(5);
ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
}
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
val |= GCC_GCC_PCIE_HOT_RST_VAL;
ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
mdelay(5);
}
static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab)
{
/* This is a WAR for PCIE Hotreset.
* When target receive Hotreset, but will set the interrupt.
* So when download SBL again, SBL will open Interrupt and
* receive it, and crash immediately.
*/
ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
}
static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab)
{
u32 val;
val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
}
static void ath12k_pci_force_wake(struct ath12k_base *ab)
{
ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
mdelay(5);
}
static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on)
{
if (power_on) {
ath12k_pci_enable_ltssm(ab);
ath12k_pci_clear_all_intrs(ab);
ath12k_pci_set_wlaon_pwr_ctrl(ab);
}
ath12k_mhi_clear_vector(ab);
ath12k_pci_clear_dbg_registers(ab);
ath12k_pci_soc_global_reset(ab);
ath12k_mhi_set_mhictrl_reset(ab);
}
static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
{
int i, j;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
}
}
static void ath12k_pci_free_irq(struct ath12k_base *ab)
{
int i, irq_idx;
for (i = 0; i < ab->hw_params->ce_count; i++) {
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
}
ath12k_pci_free_ext_irq(ab);
}
static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
{
u32 irq_idx;
irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
enable_irq(ab->irq_num[irq_idx]);
}
static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
{
u32 irq_idx;
irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
disable_irq_nosync(ab->irq_num[irq_idx]);
}
static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params->ce_count; i++) {
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath12k_pci_ce_irq_disable(ab, i);
}
}
static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
{
int i;
int irq_idx;
for (i = 0; i < ab->hw_params->ce_count; i++) {
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
synchronize_irq(ab->irq_num[irq_idx]);
}
}
static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
{
struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
ath12k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
}
static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
{
struct ath12k_ce_pipe *ce_pipe = arg;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
ath12k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
}
static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
{
int i;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc)
{
int i;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
ath12k_pci_ext_grp_disable(irq_grp);
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
}
}
static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
{
int i;
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab)
{
int i, j, irq_idx;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++) {
irq_idx = irq_grp->irqs[j];
synchronize_irq(ab->irq_num[irq_idx]);
}
}
}
static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
{
struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
struct ath12k_ext_irq_grp,
napi);
struct ath12k_base *ab = irq_grp->ab;
int work_done;
work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
ath12k_pci_ext_grp_enable(irq_grp);
}
if (work_done > budget)
work_done = budget;
return work_done;
}
static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
{
struct ath12k_ext_irq_grp *irq_grp = arg;
ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
ath12k_pci_ext_grp_disable(irq_grp);
napi_schedule(&irq_grp->napi);
return IRQ_HANDLED;
}
static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
{
int i, j, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0, base_idx;
base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
&num_vectors,
&user_base_data,
&base_vector);
if (ret < 0)
return ret;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath12k_pci_ext_grp_napi_poll);
if (ab->hw_params->ring_mask->tx[i] ||
ab->hw_params->ring_mask->rx[i] ||
ab->hw_params->ring_mask->rx_err[i] ||
ab->hw_params->ring_mask->rx_wbm_rel[i] ||
ab->hw_params->ring_mask->reo_status[i] ||
ab->hw_params->ring_mask->host2rxdma[i] ||
ab->hw_params->ring_mask->rx_mon_dest[i]) {
num_irq = 1;
}
irq_grp->num_irq = num_irq;
irq_grp->irqs[0] = base_idx + i;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
int vector = (i % num_vectors) + base_vector;
int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
ab->irq_num[irq_idx] = irq;
ath12k_dbg(ab, ATH12K_DBG_PCI,
"irq:%d group:%d\n", irq, i);
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
IRQF_SHARED,
"DP_EXT_IRQ", irq_grp);
if (ret) {
ath12k_err(ab, "failed request irq %d: %d\n",
vector, ret);
return ret;
}
disable_irq_nosync(ab->irq_num[irq_idx]);
}
}
return 0;
}
static int ath12k_pci_config_irq(struct ath12k_base *ab)
{
struct ath12k_ce_pipe *ce_pipe;
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
unsigned int msi_data;
int irq, i, ret, irq_idx;
ret = ath12k_pci_get_user_msi_assignment(ab,
"CE", &msi_data_count,
&msi_data_start, &msi_irq_start);
if (ret)
return ret;
/* Configure CE irqs */
for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
irq = ath12k_pci_get_msi_irq(ab->dev, msi_data);
ce_pipe = &ab->ce.ce_pipe[i];
irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet);
ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
IRQF_SHARED, irq_name[irq_idx],
ce_pipe);
if (ret) {
ath12k_err(ab, "failed to request irq %d: %d\n",
irq_idx, ret);
return ret;
}
ab->irq_num[irq_idx] = irq;
msi_data_idx++;
ath12k_pci_ce_irq_disable(ab, i);
}
ret = ath12k_pci_ext_irq_config(ab);
if (ret)
return ret;
return 0;
}
static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
{
struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
cfg->tgt_ce = ab->hw_params->target_ce_config;
cfg->tgt_ce_len = ab->hw_params->target_ce_count;
cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
}
static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params->ce_count; i++) {
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath12k_pci_ce_irq_enable(ab, i);
}
}
static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable)
{
struct pci_dev *dev = ab_pci->pdev;
u16 control;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
if (enable)
control |= PCI_MSI_FLAGS_ENABLE;
else
control &= ~PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci)
{
ath12k_pci_msi_config(ab_pci, true);
}
static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci)
{
ath12k_pci_msi_config(ab_pci, false);
}
static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
struct msi_desc *msi_desc;
int num_vectors;
int ret;
num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
msi_config->total_vectors,
msi_config->total_vectors,
PCI_IRQ_MSI);
if (num_vectors != msi_config->total_vectors) {
ath12k_err(ab, "failed to get %d MSI vectors, only %d available",
msi_config->total_vectors, num_vectors);
if (num_vectors >= 0)
return -EINVAL;
else
return num_vectors;
}
ath12k_pci_msi_disable(ab_pci);
msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
if (!msi_desc) {
ath12k_err(ab, "msi_desc is NULL!\n");
ret = -EINVAL;
goto free_msi_vector;
}
ab_pci->msi_ep_base_data = msi_desc->msg.data;
if (msi_desc->pci.msi_attrib.is_64)
set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
return 0;
free_msi_vector:
pci_free_irq_vectors(ab_pci->pdev);
return ret;
}
static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci)
{
pci_free_irq_vectors(ab_pci->pdev);
}
static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
{
struct ath12k_base *ab = ab_pci->ab;
u16 device_id;
int ret = 0;
pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
if (device_id != ab_pci->dev_id) {
ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
device_id, ab_pci->dev_id);
ret = -EIO;
goto out;
}
ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM);
if (ret) {
ath12k_err(ab, "failed to assign pci resource: %d\n", ret);
goto out;
}
ret = pci_enable_device(pdev);
if (ret) {
ath12k_err(ab, "failed to enable pci device: %d\n", ret);
goto out;
}
ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci");
if (ret) {
ath12k_err(ab, "failed to request pci region: %d\n", ret);
goto disable_device;
}
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(ATH12K_PCI_DMA_MASK));
if (ret) {
ath12k_err(ab, "failed to set pci dma mask to %d: %d\n",
ATH12K_PCI_DMA_MASK, ret);
goto release_region;
}
pci_set_master(pdev);
ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM);
ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0);
if (!ab->mem) {
ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM);
ret = -EIO;
goto release_region;
}
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
return 0;
release_region:
pci_release_region(pdev, ATH12K_PCI_BAR_NUM);
disable_device:
pci_disable_device(pdev);
out:
return ret;
}
static void ath12k_pci_free_region(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
struct pci_dev *pci_dev = ab_pci->pdev;
pci_iounmap(pci_dev, ab->mem);
ab->mem = NULL;
pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM);
if (pci_is_enabled(pci_dev))
pci_disable_device(pci_dev);
}
static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
&ab_pci->link_ctl);
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
ab_pci->link_ctl,
u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
/* disable L0s and L1 */
pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC);
set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
}
static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
{
if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC,
ab_pci->link_ctl &
PCI_EXP_LNKCTL_ASPMC);
}
static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params->ce_count; i++) {
struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
}
}
static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
{
ath12k_pci_ce_irqs_disable(ab);
ath12k_pci_sync_ce_irqs(ab);
ath12k_pci_kill_tasklets(ab);
}
int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
entry = &ab->hw_params->svc_to_ce_map[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
switch (__le32_to_cpu(entry->pipedir)) {
case PIPEDIR_NONE:
break;
case PIPEDIR_IN:
WARN_ON(dl_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
break;
case PIPEDIR_OUT:
WARN_ON(ul_set);
*ul_pipe = __le32_to_cpu(entry->pipenum);
ul_set = true;
break;
case PIPEDIR_INOUT:
WARN_ON(dl_set);
WARN_ON(ul_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
*ul_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
ul_set = true;
break;
}
}
if (WARN_ON(!ul_set || !dl_set))
return -ENOENT;
return 0;
}
int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
return pci_irq_vector(pci_dev, vector);
}
int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
int idx;
for (idx = 0; idx < msi_config->total_users; idx++) {
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
*num_vectors = msi_config->users[idx].num_vectors;
*user_base_data = msi_config->users[idx].base_vector
+ ab_pci->msi_ep_base_data;
*base_vector = msi_config->users[idx].base_vector;
ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
return 0;
}
}
ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
return -EINVAL;
}
void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
struct pci_dev *pci_dev = to_pci_dev(ab->dev);
pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
msi_addr_lo);
if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
msi_addr_hi);
} else {
*msi_addr_hi = 0;
}
}
void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
u32 *msi_idx)
{
u32 i, msi_data_idx;
for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
if (ce_id == i)
break;
msi_data_idx++;
}
*msi_idx = msi_data_idx;
}
void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab)
{
ath12k_pci_ce_irqs_enable(ab);
}
void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab)
{
ath12k_pci_ce_irq_disable_sync(ab);
}
void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
{
int i;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
napi_enable(&irq_grp->napi);
ath12k_pci_ext_grp_enable(irq_grp);
}
}
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
__ath12k_pci_ext_irq_disable(ab);
ath12k_pci_sync_ext_irqs(ab);
}
int ath12k_pci_hif_suspend(struct ath12k_base *ab)
{
struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
ath12k_mhi_suspend(ar_pci);
return 0;
}
int ath12k_pci_hif_resume(struct ath12k_base *ab)
{
struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
ath12k_mhi_resume(ar_pci);
return 0;
}
void ath12k_pci_stop(struct ath12k_base *ab)
{
ath12k_pci_ce_irq_disable_sync(ab);
ath12k_ce_cleanup_pipes(ab);
}
int ath12k_pci_start(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath12k_pci_aspm_restore(ab_pci);
ath12k_pci_ce_irqs_enable(ab);
ath12k_ce_rx_post_buf(ab);
return 0;
}
u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
u32 val, window_start;
int ret = 0;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup MHI to access.
*/
if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
ret = ab_pci->pci_ops->wakeup(ab);
if (offset < WINDOW_START) {
val = ioread32(ab->mem + offset);
} else {
if (ab->static_window_map)
window_start = ath12k_pci_get_window_start(ab, offset);
else
window_start = WINDOW_START;
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
spin_unlock_bh(&ab_pci->window_lock);
} else {
if ((!window_start) &&
(offset >= PCI_MHIREGLEN_REG &&
offset <= PCI_MHI_REGION_END))
offset = offset - PCI_MHIREGLEN_REG;
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
}
if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
!ret)
ab_pci->pci_ops->release(ab);
return val;
}
void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
u32 window_start;
int ret = 0;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup MHI to access.
*/
if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
ret = ab_pci->pci_ops->wakeup(ab);
if (offset < WINDOW_START) {
iowrite32(value, ab->mem + offset);
} else {
if (ab->static_window_map)
window_start = ath12k_pci_get_window_start(ab, offset);
else
window_start = WINDOW_START;
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
spin_unlock_bh(&ab_pci->window_lock);
} else {
if ((!window_start) &&
(offset >= PCI_MHIREGLEN_REG &&
offset <= PCI_MHI_REGION_END))
offset = offset - PCI_MHIREGLEN_REG;
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
}
if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
!ret)
ab_pci->pci_ops->release(ab);
}
int ath12k_pci_power_up(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
int ret;
ab_pci->register_window = 0;
clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath12k_pci_sw_reset(ab_pci->ab, true);
/* Disable ASPM during firmware download due to problems switching
* to AMSS state.
*/
ath12k_pci_aspm_disable(ab_pci);
ath12k_pci_msi_enable(ab_pci);
ret = ath12k_mhi_start(ab_pci);
if (ret) {
ath12k_err(ab, "failed to start mhi: %d\n", ret);
return ret;
}
if (ab->static_window_map)
ath12k_pci_select_static_window(ab_pci);
return 0;
}
void ath12k_pci_power_down(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
/* restore aspm in case firmware bootup fails */
ath12k_pci_aspm_restore(ab_pci);
ath12k_pci_force_wake(ab_pci->ab);
ath12k_pci_msi_disable(ab_pci);
ath12k_mhi_stop(ab_pci);
clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath12k_pci_sw_reset(ab_pci->ab, false);
}
static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.start = ath12k_pci_start,
.stop = ath12k_pci_stop,
.read32 = ath12k_pci_read32,
.write32 = ath12k_pci_write32,
.power_down = ath12k_pci_power_down,
.power_up = ath12k_pci_power_up,
.suspend = ath12k_pci_hif_suspend,
.resume = ath12k_pci_hif_resume,
.irq_enable = ath12k_pci_ext_irq_enable,
.irq_disable = ath12k_pci_ext_irq_disable,
.get_msi_address = ath12k_pci_get_msi_address,
.get_user_msi_vector = ath12k_pci_get_user_msi_assignment,
.map_service_to_pipe = ath12k_pci_map_service_to_pipe,
.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
};
static
void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor)
{
u32 soc_hw_version;
soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION);
*major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
soc_hw_version);
*minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
soc_hw_version);
ath12k_dbg(ab, ATH12K_DBG_PCI,
"pci tcsr_soc_hw_version major %d minor %d\n",
*major, *minor);
}
static int ath12k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
struct ath12k_base *ab;
struct ath12k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor;
int ret;
ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath12k base\n");
return -ENOMEM;
}
ab->dev = &pdev->dev;
pci_set_drvdata(pdev, ab);
ab_pci = ath12k_pci_priv(ab);
ab_pci->dev_id = pci_dev->device;
ab_pci->ab = ab;
ab_pci->pdev = pdev;
ab->hif.ops = &ath12k_pci_hif_ops;
pci_set_drvdata(pdev, ab);
spin_lock_init(&ab_pci->window_lock);
ret = ath12k_pci_claim(ab_pci, pdev);
if (ret) {
ath12k_err(ab, "failed to claim device: %d\n", ret);
goto err_free_core;
}
switch (pci_dev->device) {
case QCN9274_DEVICE_ID:
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = true;
ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
case ATH12K_PCI_SOC_HW_VERSION_2:
ab->hw_rev = ATH12K_HW_QCN9274_HW20;
break;
case ATH12K_PCI_SOC_HW_VERSION_1:
ab->hw_rev = ATH12K_HW_QCN9274_HW10;
break;
default:
dev_err(&pdev->dev,
"Unknown hardware version found for QCN9274: 0x%x\n",
soc_hw_version_major);
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
break;
case WCN7850_DEVICE_ID:
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = false;
ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
case ATH12K_PCI_SOC_HW_VERSION_2:
ab->hw_rev = ATH12K_HW_WCN7850_HW20;
break;
default:
dev_err(&pdev->dev,
"Unknown hardware version found for WCN7850: 0x%x\n",
soc_hw_version_major);
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
pci_dev->device);
ret = -EOPNOTSUPP;
goto err_pci_free_region;
}
ret = ath12k_pci_msi_alloc(ab_pci);
if (ret) {
ath12k_err(ab, "failed to alloc msi: %d\n", ret);
goto err_pci_free_region;
}
ret = ath12k_core_pre_init(ab);
if (ret)
goto err_pci_msi_free;
ret = ath12k_mhi_register(ab_pci);
if (ret) {
ath12k_err(ab, "failed to register mhi: %d\n", ret);
goto err_pci_msi_free;
}
ret = ath12k_hal_srng_init(ab);
if (ret)
goto err_mhi_unregister;
ret = ath12k_ce_alloc_pipes(ab);
if (ret) {
ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
goto err_hal_srng_deinit;
}
ath12k_pci_init_qmi_ce_config(ab);
ret = ath12k_pci_config_irq(ab);
if (ret) {
ath12k_err(ab, "failed to config irq: %d\n", ret);
goto err_ce_free;
}
ret = ath12k_core_init(ab);
if (ret) {
ath12k_err(ab, "failed to init core: %d\n", ret);
goto err_free_irq;
}
return 0;
err_free_irq:
ath12k_pci_free_irq(ab);
err_ce_free:
ath12k_ce_free_pipes(ab);
err_hal_srng_deinit:
ath12k_hal_srng_deinit(ab);
err_mhi_unregister:
ath12k_mhi_unregister(ab_pci);
err_pci_msi_free:
ath12k_pci_msi_free(ab_pci);
err_pci_free_region:
ath12k_pci_free_region(ab_pci);
err_free_core:
ath12k_core_free(ab);
return ret;
}
static void ath12k_pci_remove(struct pci_dev *pdev)
{
struct ath12k_base *ab = pci_get_drvdata(pdev);
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab);
ath12k_qmi_deinit_service(ab);
goto qmi_fail;
}
set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->reset_work);
ath12k_core_deinit(ab);
qmi_fail:
ath12k_mhi_unregister(ab_pci);
ath12k_pci_free_irq(ab);
ath12k_pci_msi_free(ab_pci);
ath12k_pci_free_region(ab_pci);
ath12k_hal_srng_deinit(ab);
ath12k_ce_free_pipes(ab);
ath12k_core_free(ab);
}
static void ath12k_pci_shutdown(struct pci_dev *pdev)
{
struct ath12k_base *ab = pci_get_drvdata(pdev);
ath12k_pci_power_down(ab);
}
static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
{
struct ath12k_base *ab = dev_get_drvdata(dev);
int ret;
ret = ath12k_core_suspend(ab);
if (ret)
ath12k_warn(ab, "failed to suspend core: %d\n", ret);
return ret;
}
static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
{
struct ath12k_base *ab = dev_get_drvdata(dev);
int ret;
ret = ath12k_core_resume(ab);
if (ret)
ath12k_warn(ab, "failed to resume core: %d\n", ret);
return ret;
}
static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops,
ath12k_pci_pm_suspend,
ath12k_pci_pm_resume);
static struct pci_driver ath12k_pci_driver = {
.name = "ath12k_pci",
.id_table = ath12k_pci_id_table,
.probe = ath12k_pci_probe,
.remove = ath12k_pci_remove,
.shutdown = ath12k_pci_shutdown,
.driver.pm = &ath12k_pci_pm_ops,
};
static int ath12k_pci_init(void)
{
int ret;
ret = pci_register_driver(&ath12k_pci_driver);
if (ret) {
pr_err("failed to register ath12k pci driver: %d\n",
ret);
return ret;
}
return 0;
}
module_init(ath12k_pci_init);
static void ath12k_pci_exit(void)
{
pci_unregister_driver(&ath12k_pci_driver);
}
module_exit(ath12k_pci_exit);
MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices");
MODULE_LICENSE("Dual BSD/GPL");
|
linux-master
|
drivers/net/wireless/ath/ath12k/pci.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
#include "hw.h"
static enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath12k_base *ab = arvif->ar->ab;
if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
return HAL_TCL_ENCAP_TYPE_RAW;
if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
return HAL_TCL_ENCAP_TYPE_ETHERNET;
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
}
static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
u8 *qos_ctl;
if (!ieee80211_is_data_qos(hdr->frame_control))
return;
qos_ctl = ieee80211_get_qos_ctl(hdr);
memmove(skb->data + IEEE80211_QOS_CTL_LEN,
skb->data, (void *)qos_ctl - (void *)skb->data);
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
hdr = (void *)skb->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
}
static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
else if (!ieee80211_is_data_qos(hdr->frame_control))
return HAL_DESC_REO_NON_QOS_TID;
else
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
}
enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return HAL_ENCRYPT_TYPE_WEP_40;
case WLAN_CIPHER_SUITE_WEP104:
return HAL_ENCRYPT_TYPE_WEP_104;
case WLAN_CIPHER_SUITE_TKIP:
return HAL_ENCRYPT_TYPE_TKIP_MIC;
case WLAN_CIPHER_SUITE_CCMP:
return HAL_ENCRYPT_TYPE_CCMP_128;
case WLAN_CIPHER_SUITE_CCMP_256:
return HAL_ENCRYPT_TYPE_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
return HAL_ENCRYPT_TYPE_GCMP_128;
case WLAN_CIPHER_SUITE_GCMP_256:
return HAL_ENCRYPT_TYPE_AES_GCMP_256;
default:
return HAL_ENCRYPT_TYPE_OPEN;
}
}
static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
struct ath12k_tx_desc_info *tx_desc,
u8 pool_id)
{
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
}
static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
u8 pool_id)
{
struct ath12k_tx_desc_info *desc;
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id],
struct ath12k_tx_desc_info,
list);
if (!desc) {
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
return NULL;
}
list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]);
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
return desc;
}
static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd,
struct hal_tx_info *ti)
{
struct hal_tx_msdu_ext_desc *tcl_ext_cmd = (struct hal_tx_msdu_ext_desc *)cmd;
tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr,
HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO);
tcl_ext_cmd->info1 = le32_encode_bits(0x0,
HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) |
le32_encode_bits(ti->data_len,
HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
le32_encode_bits(ti->encap_type,
HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
le32_encode_bits(ti->encrypt_type,
HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
}
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
struct hal_tx_info ti = {0};
struct ath12k_tx_desc_info *tx_desc;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct hal_tcl_data_cmd *hal_tcl_desc;
struct hal_tx_msdu_ext_desc *msg;
struct sk_buff *skb_ext_desc;
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct dp_tx_ring *tx_ring;
u8 pool_id;
u8 hal_ring_id;
int ret;
u8 ring_selector, ring_map = 0;
bool tcl_ring_retry;
bool msdu_ext_desc = false;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
return -ENOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
/* Let the default ring selection be based on current processor
* number, where one of the 3 tcl rings are selected based on
* the smp_processor_id(). In case that ring
* is full/busy, we resort to other available rings.
* If all rings are full, we drop the packet.
* TODO: Add throttling logic when all rings are full
*/
ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
tcl_ring_sel:
tcl_ring_retry = false;
ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
ring_map |= BIT(ti.ring_id);
ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
tx_ring = &dp->tx_ring[ti.ring_id];
tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
if (!tx_desc)
return -ENOMEM;
ti.bank_id = arvif->bank_id;
ti.meta_data_flags = arvif->tcl_metadata;
if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
ti.encrypt_type =
ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
if (ieee80211_has_protected(hdr->frame_control))
skb_put(skb, IEEE80211_CCMP_MIC_LEN);
} else {
ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
}
msdu_ext_desc = true;
}
ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
ti.type = HAL_TCL_DESC_TYPE_BUFFER;
ti.pkt_offset = 0;
ti.lmac_id = ar->lmac_id;
ti.vdev_id = arvif->vdev_id;
ti.bss_ast_hash = arvif->ast_hash;
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL &&
ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
}
ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
ti.tid = ath12k_dp_tx_get_tid(skb);
switch (ti.encap_type) {
case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
ath12k_dp_tx_encap_nwifi(skb);
break;
case HAL_TCL_ENCAP_TYPE_RAW:
if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
ret = -EINVAL;
goto fail_remove_tx_buf;
}
break;
case HAL_TCL_ENCAP_TYPE_ETHERNET:
/* no need to encap */
break;
case HAL_TCL_ENCAP_TYPE_802_3:
default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
goto fail_remove_tx_buf;
}
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
goto fail_remove_tx_buf;
}
tx_desc->skb = skb;
tx_desc->mac_id = ar->pdev_idx;
ti.desc_id = tx_desc->desc_id;
ti.data_len = skb->len;
skb_cb->paddr = ti.paddr;
skb_cb->vif = arvif->vif;
skb_cb->ar = ar;
if (msdu_ext_desc) {
skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
if (!skb_ext_desc) {
ret = -ENOMEM;
goto fail_unmap_dma;
}
skb_put(skb_ext_desc, sizeof(struct hal_tx_msdu_ext_desc));
memset(skb_ext_desc->data, 0, skb_ext_desc->len);
msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
ret = dma_mapping_error(ab->dev, ti.paddr);
if (ret) {
kfree_skb(skb_ext_desc);
goto fail_unmap_dma;
}
ti.data_len = skb_ext_desc->len;
ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
skb_cb->paddr_ext_desc = ti.paddr;
}
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
tcl_ring = &ab->hal.srng_list[hal_ring_id];
spin_lock_bh(&tcl_ring->lock);
ath12k_hal_srng_access_begin(ab, tcl_ring);
hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
if (!hal_tcl_desc) {
/* NOTE: It is highly unlikely we'll be running out of tcl_ring
* desc because the desc is directly enqueued onto hw queue.
*/
ath12k_hal_srng_access_end(ab, tcl_ring);
ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
/* Checking for available tcl descriptors in another ring in
* case of failure due to full tcl ring now, is better than
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
ab->hw_params->tcl_ring_retry) {
tcl_ring_retry = true;
ring_selector++;
}
goto fail_unmap_dma;
}
ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
ath12k_hal_srng_access_end(ab, tcl_ring);
spin_unlock_bh(&tcl_ring->lock);
ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
skb->data, skb->len);
atomic_inc(&ar->dp.num_tx_pending);
return 0;
fail_unmap_dma:
dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
fail_remove_tx_buf:
ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
if (tcl_ring_retry)
goto tcl_ring_sel;
return ret;
}
static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
struct sk_buff *msdu, u8 mac_id,
struct dp_tx_ring *tx_ring)
{
struct ath12k *ar;
struct ath12k_skb_cb *skb_cb;
u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
dev_kfree_skb_any(msdu);
ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
static void
ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
struct sk_buff *msdu,
struct dp_tx_ring *tx_ring,
struct ath12k_dp_htt_wbm_tx_status *ts)
{
struct ieee80211_tx_info *info;
struct ath12k_skb_cb *skb_cb;
struct ath12k *ar;
skb_cb = ATH12K_SKB_CB(msdu);
info = IEEE80211_SKB_CB(msdu);
ar = skb_cb->ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
memset(&info->status, 0, sizeof(info->status));
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
}
ieee80211_tx_status(ar->hw, msdu);
}
static void
ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
void *desc, u8 mac_id,
struct sk_buff *msdu,
struct dp_tx_ring *tx_ring)
{
struct htt_tx_wbm_completion *status_desc;
struct ath12k_dp_htt_wbm_tx_status ts = {0};
enum hal_wbm_htt_tx_comp_status wbm_status;
status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
wbm_status = le32_get_bits(status_desc->info0,
HTT_TX_WBM_COMP_INFO0_STATUS);
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.ack_rssi = le32_get_bits(status_desc->info2,
HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
/* This event is to be handled only when the driver decides to
* use WDS offload functionality.
*/
break;
default:
ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
break;
}
}
static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
struct ath12k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath12k_skb_cb *skb_cb;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
return;
}
skb_cb = ATH12K_SKB_CB(msdu);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
dev_kfree_skb_any(msdu);
goto exit;
}
if (!skb_cb->vif) {
dev_kfree_skb_any(msdu);
goto exit;
}
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
/* skip tx rate update from ieee80211_status*/
info->status.rates[0].idx = -1;
if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
/* NOTE: Tx rate status reporting. Tx completion status does not have
* necessary information (for example nss) to build the tx rate.
* Might end up reporting it out-of-band from HTT stats.
*/
ieee80211_tx_status(ar->hw, msdu);
exit:
rcu_read_unlock();
}
static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
struct hal_wbm_completion_ring_tx *desc,
struct hal_tx_status *ts)
{
ts->buf_rel_source =
le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
return;
if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
return;
ts->status = le32_get_bits(desc->info0,
HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
ts->ppdu_id = le32_get_bits(desc->info1,
HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
else
ts->rate_stats = 0;
}
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
{
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct ath12k_tx_desc_info *tx_desc = NULL;
struct sk_buff *msdu;
struct hal_tx_status ts = { 0 };
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
struct hal_wbm_release_ring *desc;
u8 mac_id, pdev_id;
u64 desc_va;
spin_lock_bh(&status_ring->lock);
ath12k_hal_srng_access_begin(ab, status_ring);
while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) {
desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
if (!desc)
break;
memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
desc, sizeof(*desc));
tx_ring->tx_status_head =
ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head);
}
if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
(ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
/* TODO: Process pending tx_status messages when kfifo_is_full() */
ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
ath12k_hal_srng_access_end(ab, status_ring);
spin_unlock_bh(&status_ring->lock);
while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
struct hal_wbm_completion_ring_tx *tx_status;
u32 desc_id;
tx_ring->tx_status_tail =
ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
ath12k_dp_tx_status_parse(ab, tx_status, &ts);
if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
/* HW done cookie conversion */
desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
le32_to_cpu(tx_status->buf_va_lo));
tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
} else {
/* SW does cookie conversion to VA */
desc_id = le32_get_bits(tx_status->buf_va_hi,
BUFFER_ADDR_INFO1_SW_COOKIE);
tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
}
if (!tx_desc) {
ath12k_warn(ab, "unable to retrieve tx_desc!");
continue;
}
msdu = tx_desc->skb;
mac_id = tx_desc->mac_id;
/* Release descriptor as soon as extracting necessary info
* to reduce contention
*/
ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
ath12k_dp_tx_process_htt_tx_complete(ab,
(void *)tx_status,
mac_id, msdu,
tx_ring);
continue;
}
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
}
}
static int
ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
int mac_id, u32 ring_id,
enum hal_ring_type ring_type,
enum htt_srng_ring_type *htt_ring_type,
enum htt_srng_ring_id *htt_ring_id)
{
int ret = 0;
switch (ring_type) {
case HAL_RXDMA_BUF:
/* for some targets, host fills rx buffer to fw and fw fills to
* rxbuf ring for each rxdma
*/
if (!ab->hw_params->rx_mac_buf_ring) {
if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
ret = -EINVAL;
}
*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
} else {
if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
*htt_ring_type = HTT_SW_TO_SW_RING;
} else {
*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
}
}
break;
case HAL_RXDMA_DST:
*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_BUF:
*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_STATUS:
*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_DST:
*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_DESC:
*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_TX_MONITOR_BUF:
*htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_TX_MONITOR_DST:
*htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
default:
ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
ret = -EINVAL;
}
return ret;
}
int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type)
{
struct htt_srng_setup_cmd *cmd;
struct hal_srng *srng = &ab->hal.srng_list[ring_id];
struct hal_srng_params params;
struct sk_buff *skb;
u32 ring_entry_sz;
int len = sizeof(*cmd);
dma_addr_t hp_addr, tp_addr;
enum htt_srng_ring_type htt_ring_type;
enum htt_srng_ring_id htt_ring_id;
int ret;
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
memset(¶ms, 0, sizeof(params));
ath12k_hal_srng_get_params(ab, srng, ¶ms);
hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
ring_type, &htt_ring_type,
&htt_ring_id);
if (ret)
goto err_free;
skb_put(skb, len);
cmd = (struct htt_srng_setup_cmd *)skb->data;
cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
if (htt_ring_type == HTT_SW_TO_HW_RING ||
htt_ring_type == HTT_HW_TO_SW_RING)
cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
else
cmd->info0 |= le32_encode_bits(mac_id,
HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
cmd->info0 |= le32_encode_bits(htt_ring_type,
HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
cmd->info0 |= le32_encode_bits(htt_ring_id,
HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
HAL_ADDR_LSB_REG_MASK);
cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT);
ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
if (ret < 0)
goto err_free;
ring_entry_sz = ret;
ring_entry_sz >>= 2;
cmd->info1 = le32_encode_bits(ring_entry_sz,
HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
if (htt_ring_type == HTT_SW_TO_HW_RING)
cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
cmd->msi_data = cpu_to_le32(params.msi_data);
cmd->intr_info =
le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
cmd->intr_info |=
le32_encode_bits(params.intr_timer_thres_us >> 3,
HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
cmd->info2 = 0;
if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
cmd->info2 = le32_encode_bits(params.low_threshold,
HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
}
ath12k_dbg(ab, ATH12K_DBG_HAL,
"%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
__func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
cmd->msi_data);
ath12k_dbg(ab, ATH12K_DBG_HAL,
"ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
ring_id, ring_type, cmd->intr_info, cmd->info2);
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ver_req_cmd *cmd;
int len = sizeof(*cmd);
int ret;
init_completion(&dp->htt_tgt_version_received);
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_ver_req_cmd *)skb->data;
cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
HTT_VER_REQ_INFO_MSG_ID);
ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ);
if (ret == 0) {
ath12k_warn(ab, "htt target version request timed out\n");
return -ETIMEDOUT;
}
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
return -ENOTSUPP;
}
return 0;
}
int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ppdu_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
u8 pdev_mask;
int ret;
int i;
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
HTT_PPDU_STATS_CFG_MSG_TYPE);
pdev_mask = 1 << (i + 1);
cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
}
return 0;
}
int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type,
int rx_buf_size,
struct htt_rx_ring_tlv_filter *tlv_filter)
{
struct htt_rx_ring_selection_cfg_cmd *cmd;
struct hal_srng *srng = &ab->hal.srng_list[ring_id];
struct hal_srng_params params;
struct sk_buff *skb;
int len = sizeof(*cmd);
enum htt_srng_ring_type htt_ring_type;
enum htt_srng_ring_id htt_ring_id;
int ret;
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
memset(¶ms, 0, sizeof(params));
ath12k_hal_srng_get_params(ab, srng, ¶ms);
ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
ring_type, &htt_ring_type,
&htt_ring_id);
if (ret)
goto err_free;
skb_put(skb, len);
cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
if (htt_ring_type == HTT_SW_TO_HW_RING ||
htt_ring_type == HTT_HW_TO_SW_RING)
cmd->info0 |=
le32_encode_bits(DP_SW2HW_MACID(mac_id),
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
else
cmd->info0 |=
le32_encode_bits(mac_id,
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
cmd->info0 |= le32_encode_bits(htt_ring_id,
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
cmd->info1 = le32_encode_bits(rx_buf_size,
HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
if (tlv_filter->offset_valid) {
cmd->rx_packet_offset =
le32_encode_bits(tlv_filter->rx_packet_offset,
HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
cmd->rx_packet_offset |=
le32_encode_bits(tlv_filter->rx_header_offset,
HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
cmd->rx_mpdu_offset =
le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
cmd->rx_mpdu_offset |=
le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
cmd->rx_msdu_offset =
le32_encode_bits(tlv_filter->rx_msdu_end_offset,
HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
cmd->rx_msdu_offset |=
le32_encode_bits(tlv_filter->rx_msdu_start_offset,
HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
cmd->rx_attn_offset =
le32_encode_bits(tlv_filter->rx_attn_offset,
HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
}
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}
int
ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
struct htt_ext_stats_cfg_params *cfg_params,
u64 cookie)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct htt_ext_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
int ret;
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
ath12k_warn(ab, "failed to send htt type stats request: %d",
ret);
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
{
struct ath12k_base *ab = ar->ab;
int ret;
ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
if (ret) {
ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
return ret;
}
ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
if (ret) {
ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
return ret;
}
return 0;
}
int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
int ret, ring_id;
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
tlv_filter.offset_valid = false;
if (!reset) {
tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
tlv_filter.pkt_filter_flags0 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
tlv_filter.pkt_filter_flags1 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
tlv_filter.pkt_filter_flags2 =
HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
tlv_filter.pkt_filter_flags3 =
HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
HTT_RX_MON_MO_DATA_FILTER_FLASG3;
}
if (ab->hw_params->rxdma1_enable) {
ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
HAL_RXDMA_MONITOR_BUF,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
if (ret) {
ath12k_err(ab,
"failed to setup filter for monitor buf %d\n", ret);
return ret;
}
}
return 0;
}
int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type,
int tx_buf_size,
struct htt_tx_ring_tlv_filter *htt_tlv_filter)
{
struct htt_tx_ring_selection_cfg_cmd *cmd;
struct hal_srng *srng = &ab->hal.srng_list[ring_id];
struct hal_srng_params params;
struct sk_buff *skb;
int len = sizeof(*cmd);
enum htt_srng_ring_type htt_ring_type;
enum htt_srng_ring_id htt_ring_id;
int ret;
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
memset(¶ms, 0, sizeof(params));
ath12k_hal_srng_get_params(ab, srng, ¶ms);
ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
ring_type, &htt_ring_type,
&htt_ring_id);
if (ret)
goto err_free;
skb_put(skb, len);
cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
if (htt_ring_type == HTT_SW_TO_HW_RING ||
htt_ring_type == HTT_HW_TO_SW_RING)
cmd->info0 |=
le32_encode_bits(DP_SW2HW_MACID(mac_id),
HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
else
cmd->info0 |=
le32_encode_bits(mac_id,
HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
cmd->info0 |= le32_encode_bits(htt_ring_id,
HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
cmd->info1 |=
le32_encode_bits(tx_buf_size,
HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
if (htt_tlv_filter->tx_mon_mgmt_filter) {
cmd->info1 |=
le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
cmd->info1 |=
le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
cmd->info2 |=
le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
}
if (htt_tlv_filter->tx_mon_data_filter) {
cmd->info1 |=
le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
cmd->info1 |=
le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
cmd->info2 |=
le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
}
if (htt_tlv_filter->tx_mon_ctrl_filter) {
cmd->info1 |=
le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
cmd->info1 |=
le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
cmd->info2 |=
le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
}
cmd->tlv_filter_mask_in0 =
cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
cmd->tlv_filter_mask_in1 =
cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
cmd->tlv_filter_mask_in2 =
cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
cmd->tlv_filter_mask_in3 =
cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
return 0;
err_free:
dev_kfree_skb_any(skb);
return ret;
}
int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
struct htt_tx_ring_tlv_filter tlv_filter = {0};
int ret, ring_id;
ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
/* TODO: Need to set upstream/downstream tlv filters
* here
*/
if (ab->hw_params->rxdma1_enable) {
ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
HAL_TX_MONITOR_BUF,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
if (ret) {
ath12k_err(ab,
"failed to setup filter for monitor buf %d\n", ret);
return ret;
}
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/dp_tx.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
#include "hal.h"
#include "hal_tx.h"
#include "hal_rx.h"
#include "hal_desc.h"
#include "hif.h"
static void ath12k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
u8 owner, u8 buffer_type, u32 magic)
{
hdr->info0 = le32_encode_bits(owner, HAL_DESC_HDR_INFO0_OWNER) |
le32_encode_bits(buffer_type, HAL_DESC_HDR_INFO0_BUF_TYPE);
/* Magic pattern in reserved bits for debugging */
hdr->info0 |= le32_encode_bits(magic, HAL_DESC_HDR_INFO0_DBG_RESERVED);
}
static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
struct ath12k_hal_reo_cmd *cmd)
{
struct hal_reo_get_queue_stats *desc;
tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_get_queue_stats *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
desc->queue_addr_lo = cpu_to_le32(cmd->addr_lo);
desc->info0 = le32_encode_bits(cmd->addr_hi,
HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI);
if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
desc->info0 |= cpu_to_le32(HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS);
return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
}
static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
struct hal_tlv_64_hdr *tlv,
struct ath12k_hal_reo_cmd *cmd)
{
struct hal_reo_flush_cache *desc;
u8 avail_slot = ffz(hal->avail_blk_resource);
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
return -ENOSPC;
hal->current_blk_index = avail_slot;
}
tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_flush_cache *)tlv->value;
memset_startat(desc, 0, cache_addr_lo);
desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
desc->cache_addr_lo = cpu_to_le32(cmd->addr_lo);
desc->info0 = le32_encode_bits(cmd->addr_hi,
HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI);
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS);
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE);
desc->info0 |=
le32_encode_bits(avail_slot,
HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX);
}
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE);
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL);
return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
}
static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
struct ath12k_hal_reo_cmd *cmd)
{
struct hal_reo_update_rx_queue *desc;
tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_update_rx_queue *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
desc->queue_addr_lo = cpu_to_le32(cmd->addr_lo);
desc->info0 =
le32_encode_bits(cmd->addr_hi,
HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_AC),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID) |
le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN),
HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN);
desc->info1 =
le32_encode_bits(cmd->rx_queue_num,
HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD),
HAL_REO_UPD_RX_QUEUE_INFO1_VLD) |
le32_encode_bits(u32_get_bits(cmd->upd1, HAL_REO_CMD_UPD1_ALDC),
HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION),
HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN),
HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN) |
le32_encode_bits(u32_get_bits(cmd->upd1, HAL_REO_CMD_UPD1_AC),
HAL_REO_UPD_RX_QUEUE_INFO1_AC) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR),
HAL_REO_UPD_RX_QUEUE_INFO1_BAR) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE),
HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY),
HAL_REO_UPD_RX_QUEUE_INFO1_RETRY) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE),
HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK),
HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN),
HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN),
HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE),
HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE) |
le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG),
HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG);
if (cmd->pn_size == 24)
cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
else if (cmd->pn_size == 48)
cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
else if (cmd->pn_size == 128)
cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
if (cmd->ba_window_size < 1)
cmd->ba_window_size = 1;
if (cmd->ba_window_size == 1)
cmd->ba_window_size++;
desc->info2 =
le32_encode_bits(cmd->ba_window_size - 1,
HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE) |
le32_encode_bits(cmd->pn_size, HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE) |
le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD),
HAL_REO_UPD_RX_QUEUE_INFO2_SVLD) |
le32_encode_bits(u32_get_bits(cmd->upd2, HAL_REO_CMD_UPD2_SSN),
HAL_REO_UPD_RX_QUEUE_INFO2_SSN) |
le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR),
HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR) |
le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR),
HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR);
return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
}
int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
enum hal_reo_cmd_type type,
struct ath12k_hal_reo_cmd *cmd)
{
struct hal_tlv_64_hdr *reo_desc;
int ret;
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
reo_desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!reo_desc) {
ret = -ENOBUFS;
goto out;
}
switch (type) {
case HAL_REO_CMD_GET_QUEUE_STATS:
ret = ath12k_hal_reo_cmd_queue_stats(reo_desc, cmd);
break;
case HAL_REO_CMD_FLUSH_CACHE:
ret = ath12k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
break;
case HAL_REO_CMD_UPDATE_RX_QUEUE:
ret = ath12k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
break;
case HAL_REO_CMD_FLUSH_QUEUE:
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath12k_warn(ab, "Unsupported reo command %d\n", type);
ret = -ENOTSUPP;
break;
default:
ath12k_warn(ab, "Unknown reo command %d\n", type);
ret = -EINVAL;
break;
}
out:
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return ret;
}
void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
dma_addr_t paddr, u32 cookie, u8 manager)
{
u32 paddr_lo, paddr_hi;
paddr_lo = lower_32_bits(paddr);
paddr_hi = upper_32_bits(paddr);
binfo->info0 = le32_encode_bits(paddr_lo, BUFFER_ADDR_INFO0_ADDR);
binfo->info1 = le32_encode_bits(paddr_hi, BUFFER_ADDR_INFO1_ADDR) |
le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE) |
le32_encode_bits(manager, BUFFER_ADDR_INFO1_RET_BUF_MGR);
}
void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
dma_addr_t *paddr,
u32 *cookie, u8 *rbm)
{
*paddr = (((u64)le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_ADDR)) << 32) |
le32_get_bits(binfo->info0, BUFFER_ADDR_INFO0_ADDR);
*cookie = le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
*rbm = le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_RET_BUF_MGR);
}
void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus,
u32 *msdu_cookies,
enum hal_rx_buf_return_buf_manager *rbm)
{
struct hal_rx_msdu_details *msdu;
u32 val;
int i;
*num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
msdu = &link->msdu_link[0];
*rbm = le32_get_bits(msdu->buf_addr_info.info1,
BUFFER_ADDR_INFO1_RET_BUF_MGR);
for (i = 0; i < *num_msdus; i++) {
msdu = &link->msdu_link[i];
val = le32_get_bits(msdu->buf_addr_info.info0,
BUFFER_ADDR_INFO0_ADDR);
if (val == 0) {
*num_msdus = i;
break;
}
*msdu_cookies = le32_get_bits(msdu->buf_addr_info.info1,
BUFFER_ADDR_INFO1_SW_COOKIE);
msdu_cookies++;
}
}
int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
struct hal_reo_dest_ring *desc,
dma_addr_t *paddr, u32 *desc_bank)
{
enum hal_reo_dest_ring_push_reason push_reason;
enum hal_reo_dest_ring_error_code err_code;
u32 cookie, val;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
err_code = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_ERROR_CODE);
ab->soc_stats.reo_error[err_code]++;
if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
ath12k_warn(ab, "expected error push reason code, received %d\n",
push_reason);
return -EINVAL;
}
val = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE);
if (val != HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
ath12k_warn(ab, "expected buffer type link_desc");
return -EINVAL;
}
ath12k_hal_rx_reo_ent_paddr_get(ab, &desc->buf_addr_info, paddr, &cookie);
*desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
return 0;
}
int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
struct hal_rx_wbm_rel_info *rel_info)
{
struct hal_wbm_release_ring *wbm_desc = desc;
struct hal_wbm_release_ring_cc_rx *wbm_cc_desc = desc;
enum hal_wbm_rel_desc_type type;
enum hal_wbm_rel_src_module rel_src;
bool hw_cc_done;
u64 desc_va;
u32 val;
type = le32_get_bits(wbm_desc->info0, HAL_WBM_RELEASE_INFO0_DESC_TYPE);
/* We expect only WBM_REL buffer type */
if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
WARN_ON(1);
return -EINVAL;
}
rel_src = le32_get_bits(wbm_desc->info0,
HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
rel_src != HAL_WBM_REL_SRC_MODULE_REO)
return -EINVAL;
/* The format of wbm rel ring desc changes based on the
* hw cookie conversion status
*/
hw_cc_done = le32_get_bits(wbm_desc->info0,
HAL_WBM_RELEASE_RX_INFO0_CC_STATUS);
if (!hw_cc_done) {
val = le32_get_bits(wbm_desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_RET_BUF_MGR);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
rel_info->cookie = le32_get_bits(wbm_desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_SW_COOKIE);
rel_info->rx_desc = NULL;
} else {
val = le32_get_bits(wbm_cc_desc->info0,
HAL_WBM_RELEASE_RX_CC_INFO0_RBM);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
rel_info->cookie = le32_get_bits(wbm_cc_desc->info1,
HAL_WBM_RELEASE_RX_CC_INFO1_COOKIE);
desc_va = ((u64)le32_to_cpu(wbm_cc_desc->buf_va_hi) << 32 |
le32_to_cpu(wbm_cc_desc->buf_va_lo));
rel_info->rx_desc =
(struct ath12k_rx_desc_info *)((unsigned long)desc_va);
}
rel_info->err_rel_src = rel_src;
rel_info->hw_cc_done = hw_cc_done;
rel_info->first_msdu = le32_get_bits(wbm_desc->info3,
HAL_WBM_RELEASE_INFO3_FIRST_MSDU);
rel_info->last_msdu = le32_get_bits(wbm_desc->info3,
HAL_WBM_RELEASE_INFO3_LAST_MSDU);
rel_info->continuation = le32_get_bits(wbm_desc->info3,
HAL_WBM_RELEASE_INFO3_CONTINUATION);
if (rel_info->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
rel_info->push_reason =
le32_get_bits(wbm_desc->info0,
HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON);
rel_info->err_code =
le32_get_bits(wbm_desc->info0,
HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE);
} else {
rel_info->push_reason =
le32_get_bits(wbm_desc->info0,
HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON);
rel_info->err_code =
le32_get_bits(wbm_desc->info0,
HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE);
}
return 0;
}
void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
struct ath12k_buffer_addr *buff_addr,
dma_addr_t *paddr, u32 *cookie)
{
*paddr = ((u64)(le32_get_bits(buff_addr->info1,
BUFFER_ADDR_INFO1_ADDR)) << 32) |
le32_get_bits(buff_addr->info0, BUFFER_ADDR_INFO0_ADDR);
*cookie = le32_get_bits(buff_addr->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
}
void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
struct hal_wbm_release_ring *dst_desc,
struct hal_wbm_release_ring *src_desc,
enum hal_wbm_rel_bm_act action)
{
dst_desc->buf_addr_info = src_desc->buf_addr_info;
dst_desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) |
le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) |
le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
HAL_WBM_RELEASE_INFO0_DESC_TYPE);
}
void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
struct hal_reo_status *status)
{
struct hal_reo_get_queue_stats_status *desc =
(struct hal_reo_get_queue_stats_status *)tlv->value;
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
status->uniform_hdr.cmd_status =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
ath12k_dbg(ab, ATH12K_DBG_HAL, "Queue stats status:\n");
ath12k_dbg(ab, ATH12K_DBG_HAL, "header: cmd_num %d status %d\n",
status->uniform_hdr.cmd_num,
status->uniform_hdr.cmd_status);
ath12k_dbg(ab, ATH12K_DBG_HAL, "ssn %u cur_idx %u\n",
le32_get_bits(desc->info0,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN),
le32_get_bits(desc->info0,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX));
ath12k_dbg(ab, ATH12K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
ath12k_dbg(ab, ATH12K_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
desc->last_rx_enqueue_timestamp,
desc->last_rx_dequeue_timestamp);
ath12k_dbg(ab, ATH12K_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
desc->rx_bitmap[6], desc->rx_bitmap[7]);
ath12k_dbg(ab, ATH12K_DBG_HAL, "count: cur_mpdu %u cur_msdu %u\n",
le32_get_bits(desc->info1,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT),
le32_get_bits(desc->info1,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT));
ath12k_dbg(ab, ATH12K_DBG_HAL, "fwd_timeout %u fwd_bar %u dup_count %u\n",
le32_get_bits(desc->info2,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT),
le32_get_bits(desc->info2,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT),
le32_get_bits(desc->info2,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT));
ath12k_dbg(ab, ATH12K_DBG_HAL, "frames_in_order %u bar_rcvd %u\n",
le32_get_bits(desc->info3,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT),
le32_get_bits(desc->info3,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT));
ath12k_dbg(ab, ATH12K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
desc->num_mpdu_frames, desc->num_msdu_frames,
desc->total_bytes);
ath12k_dbg(ab, ATH12K_DBG_HAL, "late_rcvd %u win_jump_2k %u hole_cnt %u\n",
le32_get_bits(desc->info4,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU),
le32_get_bits(desc->info2,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_WINDOW_JMP2K),
le32_get_bits(desc->info4,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT));
ath12k_dbg(ab, ATH12K_DBG_HAL, "looping count %u\n",
le32_get_bits(desc->info5,
HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT));
}
void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
struct hal_reo_status *status)
{
struct hal_reo_flush_queue_status *desc =
(struct hal_reo_flush_queue_status *)tlv->value;
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
status->uniform_hdr.cmd_status =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
status->u.flush_queue.err_detected =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED);
}
void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
struct hal_reo_status *status)
{
struct ath12k_hal *hal = &ab->hal;
struct hal_reo_flush_cache_status *desc =
(struct hal_reo_flush_cache_status *)tlv->value;
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
status->uniform_hdr.cmd_status =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
status->u.flush_cache.err_detected =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR);
status->u.flush_cache.err_code =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE);
if (!status->u.flush_cache.err_code)
hal->avail_blk_resource |= BIT(hal->current_blk_index);
status->u.flush_cache.cache_controller_flush_status_hit =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT);
status->u.flush_cache.cache_controller_flush_status_desc_type =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE);
status->u.flush_cache.cache_controller_flush_status_client_id =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID);
status->u.flush_cache.cache_controller_flush_status_err =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR);
status->u.flush_cache.cache_controller_flush_status_cnt =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT);
}
void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
struct hal_reo_status *status)
{
struct ath12k_hal *hal = &ab->hal;
struct hal_reo_unblock_cache_status *desc =
(struct hal_reo_unblock_cache_status *)tlv->value;
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
status->uniform_hdr.cmd_status =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
status->u.unblock_cache.err_detected =
le32_get_bits(desc->info0,
HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR);
status->u.unblock_cache.unblock_type =
le32_get_bits(desc->info0,
HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE);
if (!status->u.unblock_cache.err_detected &&
status->u.unblock_cache.unblock_type ==
HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
}
void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
struct hal_tlv_64_hdr *tlv,
struct hal_reo_status *status)
{
struct hal_reo_flush_timeout_list_status *desc =
(struct hal_reo_flush_timeout_list_status *)tlv->value;
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
status->uniform_hdr.cmd_status =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
status->u.timeout_list.err_detected =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR);
status->u.timeout_list.list_empty =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY);
status->u.timeout_list.release_desc_cnt =
le32_get_bits(desc->info1,
HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT);
status->u.timeout_list.fwd_buf_cnt =
le32_get_bits(desc->info0,
HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT);
}
void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
struct hal_tlv_64_hdr *tlv,
struct hal_reo_status *status)
{
struct hal_reo_desc_thresh_reached_status *desc =
(struct hal_reo_desc_thresh_reached_status *)tlv->value;
status->uniform_hdr.cmd_num =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
status->uniform_hdr.cmd_status =
le32_get_bits(desc->hdr.info0,
HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
status->u.desc_thresh_reached.threshold_idx =
le32_get_bits(desc->info0,
HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX);
status->u.desc_thresh_reached.link_desc_counter0 =
le32_get_bits(desc->info1,
HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0);
status->u.desc_thresh_reached.link_desc_counter1 =
le32_get_bits(desc->info2,
HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1);
status->u.desc_thresh_reached.link_desc_counter2 =
le32_get_bits(desc->info3,
HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2);
status->u.desc_thresh_reached.link_desc_counter_sum =
le32_get_bits(desc->info4,
HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM);
}
void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
struct hal_tlv_64_hdr *tlv,
struct hal_reo_status *status)
{
struct hal_reo_status_hdr *desc =
(struct hal_reo_status_hdr *)tlv->value;
status->uniform_hdr.cmd_num =
le32_get_bits(desc->info0,
HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
status->uniform_hdr.cmd_status =
le32_get_bits(desc->info0,
HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
}
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
{
u32 num_ext_desc;
if (ba_window_size <= 1) {
if (tid != HAL_DESC_REO_NON_QOS_TID)
num_ext_desc = 1;
else
num_ext_desc = 0;
} else if (ba_window_size <= 105) {
num_ext_desc = 1;
} else if (ba_window_size <= 210) {
num_ext_desc = 2;
} else {
num_ext_desc = 3;
}
return sizeof(struct hal_rx_reo_queue) +
(num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
}
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
int tid, u32 ba_window_size,
u32 start_seq, enum hal_pn_type type)
{
struct hal_rx_reo_queue_ext *ext_desc;
memset(qdesc, 0, sizeof(*qdesc));
ath12k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
qdesc->rx_queue_num = le32_encode_bits(tid, HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER);
qdesc->info0 =
le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_VLD) |
le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER) |
le32_encode_bits(ath12k_tid_to_ac(tid), HAL_RX_REO_QUEUE_INFO0_AC);
if (ba_window_size < 1)
ba_window_size = 1;
if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
ba_window_size++;
if (ba_window_size == 1)
qdesc->info0 |= le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_RETRY);
qdesc->info0 |= le32_encode_bits(ba_window_size - 1,
HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE);
switch (type) {
case HAL_PN_TYPE_NONE:
case HAL_PN_TYPE_WAPI_EVEN:
case HAL_PN_TYPE_WAPI_UNEVEN:
break;
case HAL_PN_TYPE_WPA:
qdesc->info0 |=
le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_PN_CHECK) |
le32_encode_bits(HAL_RX_REO_QUEUE_PN_SIZE_48,
HAL_RX_REO_QUEUE_INFO0_PN_SIZE);
break;
}
/* TODO: Set Ignore ampdu flags based on BA window size and/or
* AMPDU capabilities
*/
qdesc->info0 |= le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG);
qdesc->info1 |= le32_encode_bits(0, HAL_RX_REO_QUEUE_INFO1_SVLD);
if (start_seq <= 0xfff)
qdesc->info1 = le32_encode_bits(start_seq,
HAL_RX_REO_QUEUE_INFO1_SSN);
if (tid == HAL_DESC_REO_NON_QOS_TID)
return;
ext_desc = qdesc->ext_desc;
/* TODO: HW queue descriptors are currently allocated for max BA
* window size for all QOS TIDs so that same descriptor can be used
* later when ADDBA request is received. This should be changed to
* allocate HW queue descriptors based on BA window size being
* negotiated (0 for non BA cases), and reallocate when BA window
* size changes and also send WMI message to FW to change the REO
* queue descriptor in Rx peer entry as part of dp_rx_tid_update.
*/
memset(ext_desc, 0, 3 * sizeof(*ext_desc));
ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
ext_desc++;
ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
ext_desc++;
ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
}
void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab,
struct hal_srng *srng)
{
struct hal_srng_params params;
struct hal_tlv_64_hdr *tlv;
struct hal_reo_get_queue_stats *desc;
int i, cmd_num = 1;
int entry_size;
u8 *entry;
memset(¶ms, 0, sizeof(params));
entry_size = ath12k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
ath12k_hal_srng_get_params(ab, srng, ¶ms);
entry = (u8 *)params.ring_base_vaddr;
for (i = 0; i < params.num_entries; i++) {
tlv = (struct hal_tlv_64_hdr *)entry;
desc = (struct hal_reo_get_queue_stats *)tlv->value;
desc->cmd.info0 = le32_encode_bits(cmd_num++,
HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
entry += entry_size;
}
}
void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 val;
val = ath12k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
val |= u32_encode_bits(1, HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE) |
u32_encode_bits(1, HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE);
ath12k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
val = ath12k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab));
val &= ~(HAL_REO1_MISC_CTL_FRAG_DST_RING |
HAL_REO1_MISC_CTL_BAR_DST_RING);
val |= u32_encode_bits(HAL_SRNG_RING_ID_REO2SW0,
HAL_REO1_MISC_CTL_FRAG_DST_RING);
val |= u32_encode_bits(HAL_SRNG_RING_ID_REO2SW0,
HAL_REO1_MISC_CTL_BAR_DST_RING);
ath12k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab), val);
ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
HAL_DEFAULT_VO_REO_TIMEOUT_USEC);
ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
ring_hash_map);
ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
ring_hash_map);
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/hal_rx.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "debug.h"
static int ath12k_dbring_bufs_replenish(struct ath12k *ar,
struct ath12k_dbring *ring,
struct ath12k_dbring_element *buff,
gfp_t gfp)
{
struct ath12k_base *ab = ar->ab;
struct hal_srng *srng;
dma_addr_t paddr;
void *ptr_aligned, *ptr_unaligned, *desc;
int ret;
int buf_id;
u32 cookie;
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
lockdep_assert_held(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
ptr_unaligned = buff->payload;
ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
DMA_FROM_DEVICE);
ret = dma_mapping_error(ab->dev, paddr);
if (ret)
goto err;
spin_lock_bh(&ring->idr_lock);
buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
spin_unlock_bh(&ring->idr_lock);
if (buf_id < 0) {
ret = -ENOBUFS;
goto err_dma_unmap;
}
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc) {
ret = -ENOENT;
goto err_idr_remove;
}
buff->paddr = paddr;
cookie = u32_encode_bits(ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
ath12k_hal_srng_access_end(ab, srng);
return 0;
err_idr_remove:
spin_lock_bh(&ring->idr_lock);
idr_remove(&ring->bufs_idr, buf_id);
spin_unlock_bh(&ring->idr_lock);
err_dma_unmap:
dma_unmap_single(ab->dev, paddr, ring->buf_sz,
DMA_FROM_DEVICE);
err:
ath12k_hal_srng_access_end(ab, srng);
return ret;
}
static int ath12k_dbring_fill_bufs(struct ath12k *ar,
struct ath12k_dbring *ring,
gfp_t gfp)
{
struct ath12k_dbring_element *buff;
struct hal_srng *srng;
struct ath12k_base *ab = ar->ab;
int num_remain, req_entries, num_free;
u32 align;
int size, ret;
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
spin_lock_bh(&srng->lock);
num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
req_entries = min(num_free, ring->bufs_max);
num_remain = req_entries;
align = ring->buf_align;
size = sizeof(*buff) + ring->buf_sz + align - 1;
while (num_remain > 0) {
buff = kzalloc(size, gfp);
if (!buff)
break;
ret = ath12k_dbring_bufs_replenish(ar, ring, buff, gfp);
if (ret) {
ath12k_warn(ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
kfree(buff);
break;
}
num_remain--;
}
spin_unlock_bh(&srng->lock);
return num_remain;
}
int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar,
struct ath12k_dbring *ring,
enum wmi_direct_buffer_module id)
{
struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {0};
int ret;
if (id >= WMI_DIRECT_BUF_MAX)
return -EINVAL;
arg.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
arg.module_id = id;
arg.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
arg.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
arg.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
arg.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
arg.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
arg.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
arg.num_elems = ring->bufs_max;
arg.buf_size = ring->buf_sz;
arg.num_resp_per_event = ring->num_resp_per_event;
arg.event_timeout_ms = ring->event_timeout_ms;
ret = ath12k_wmi_pdev_dma_ring_cfg(ar, &arg);
if (ret) {
ath12k_warn(ar->ab, "failed to setup db ring cfg\n");
return ret;
}
return 0;
}
int ath12k_dbring_set_cfg(struct ath12k *ar, struct ath12k_dbring *ring,
u32 num_resp_per_event, u32 event_timeout_ms,
int (*handler)(struct ath12k *,
struct ath12k_dbring_data *))
{
if (WARN_ON(!ring))
return -EINVAL;
ring->num_resp_per_event = num_resp_per_event;
ring->event_timeout_ms = event_timeout_ms;
ring->handler = handler;
return 0;
}
int ath12k_dbring_buf_setup(struct ath12k *ar,
struct ath12k_dbring *ring,
struct ath12k_dbring_cap *db_cap)
{
struct ath12k_base *ab = ar->ab;
struct hal_srng *srng;
int ret;
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
ring->bufs_max = ring->refill_srng.size /
ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
ring->buf_sz = db_cap->min_buf_sz;
ring->buf_align = db_cap->min_buf_align;
ring->pdev_id = db_cap->pdev_id;
ring->hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
ring->tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
ret = ath12k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
return ret;
}
int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring,
int ring_num, int num_entries)
{
int ret;
ret = ath12k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
ring_num, ar->pdev_idx, num_entries);
if (ret < 0) {
ath12k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
ret, ring_num);
goto err;
}
return 0;
err:
ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
return ret;
}
int ath12k_dbring_get_cap(struct ath12k_base *ab,
u8 pdev_idx,
enum wmi_direct_buffer_module id,
struct ath12k_dbring_cap *db_cap)
{
int i;
if (!ab->num_db_cap || !ab->db_caps)
return -ENOENT;
if (id >= WMI_DIRECT_BUF_MAX)
return -EINVAL;
for (i = 0; i < ab->num_db_cap; i++) {
if (pdev_idx == ab->db_caps[i].pdev_id &&
id == ab->db_caps[i].id) {
*db_cap = ab->db_caps[i];
return 0;
}
}
return -ENOENT;
}
int ath12k_dbring_buffer_release_event(struct ath12k_base *ab,
struct ath12k_dbring_buf_release_event *ev)
{
struct ath12k_dbring *ring = NULL;
struct hal_srng *srng;
struct ath12k *ar;
struct ath12k_dbring_element *buff;
struct ath12k_dbring_data handler_data;
struct ath12k_buffer_addr desc;
u8 *vaddr_unalign;
u32 num_entry, num_buff_reaped;
u8 pdev_idx, rbm;
u32 cookie;
int buf_id;
int size;
dma_addr_t paddr;
int ret = 0;
pdev_idx = le32_to_cpu(ev->fixed.pdev_id);
if (pdev_idx >= ab->num_radios) {
ath12k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
return -EINVAL;
}
if (ev->fixed.num_buf_release_entry !=
ev->fixed.num_meta_data_entry) {
ath12k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
ev->fixed.num_buf_release_entry,
ev->fixed.num_meta_data_entry);
return -EINVAL;
}
ar = ab->pdevs[pdev_idx].ar;
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
ret = -EINVAL;
goto rcu_unlock;
}
switch (ev->fixed.module_id) {
case WMI_DIRECT_BUF_SPECTRAL:
break;
default:
ring = NULL;
ath12k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
ev->fixed.module_id);
break;
}
if (!ring) {
ret = -EINVAL;
goto rcu_unlock;
}
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
num_entry = le32_to_cpu(ev->fixed.num_buf_release_entry);
size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1;
num_buff_reaped = 0;
spin_lock_bh(&srng->lock);
while (num_buff_reaped < num_entry) {
desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
handler_data.meta = ev->meta_data[num_buff_reaped];
num_buff_reaped++;
ath12k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
spin_lock_bh(&ring->idr_lock);
buff = idr_find(&ring->bufs_idr, buf_id);
if (!buff) {
spin_unlock_bh(&ring->idr_lock);
continue;
}
idr_remove(&ring->bufs_idr, buf_id);
spin_unlock_bh(&ring->idr_lock);
dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
DMA_FROM_DEVICE);
if (ring->handler) {
vaddr_unalign = buff->payload;
handler_data.data = PTR_ALIGN(vaddr_unalign,
ring->buf_align);
handler_data.data_sz = ring->buf_sz;
ring->handler(ar, &handler_data);
}
memset(buff, 0, size);
ath12k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
}
spin_unlock_bh(&srng->lock);
rcu_unlock:
rcu_read_unlock();
return ret;
}
void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
{
ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
}
void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
{
struct ath12k_dbring_element *buff;
int buf_id;
spin_lock_bh(&ring->idr_lock);
idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
idr_remove(&ring->bufs_idr, buf_id);
dma_unmap_single(ar->ab->dev, buff->paddr,
ring->buf_sz, DMA_FROM_DEVICE);
kfree(buff);
}
idr_destroy(&ring->bufs_idr);
spin_unlock_bh(&ring->idr_lock);
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/dbring.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
const struct ce_attr ath12k_host_ce_config_qcn9274[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 16,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE1: target->host HTT + HTC control */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath12k_htc_rx_completion_handler,
},
/* CE2: target->host WMI */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 128,
.recv_cb = ath12k_htc_rx_completion_handler,
},
/* CE3: host->target WMI (mac0) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE4: host->target HTT */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 2048,
.src_sz_max = 256,
.dest_nentries = 0,
},
/* CE5: target->host pktlog */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
},
/* CE6: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE7: host->target WMI (mac1) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE8: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE9: MHI */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE10: MHI */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE11: MHI */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE12: CV Prefetch */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE13: CV Prefetch */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE14: target->host dbg log */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath12k_htc_rx_completion_handler,
},
/* CE15: reserved for future use */
{
.flags = (CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
};
const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 16,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE1: target->host HTT + HTC control */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath12k_htc_rx_completion_handler,
},
/* CE2: target->host WMI */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 64,
.recv_cb = ath12k_htc_rx_completion_handler,
},
/* CE3: host->target WMI (mac0) */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE4: host->target HTT */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 2048,
.src_sz_max = 256,
.dest_nentries = 0,
},
/* CE5: target->host pktlog */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE6: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE7: host->target WMI (mac1) */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE8: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
};
static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
struct sk_buff *skb, dma_addr_t paddr)
{
struct ath12k_base *ab = pipe->ab;
struct ath12k_ce_ring *ring = pipe->dest_ring;
struct hal_srng *srng;
unsigned int write_index;
unsigned int nentries_mask = ring->nentries_mask;
struct hal_ce_srng_dest_desc *desc;
int ret;
lockdep_assert_held(&ab->ce.ce_lock);
write_index = ring->write_index;
srng = &ab->hal.srng_list[ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
ret = -ENOSPC;
goto exit;
}
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc) {
ret = -ENOSPC;
goto exit;
}
ath12k_hal_ce_dst_set_desc(desc, paddr);
ring->skb[write_index] = skb;
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
ring->write_index = write_index;
pipe->rx_buf_needed--;
ret = 0;
exit:
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return ret;
}
static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
{
struct ath12k_base *ab = pipe->ab;
struct sk_buff *skb;
dma_addr_t paddr;
int ret = 0;
if (!(pipe->dest_ring || pipe->status_ring))
return 0;
spin_lock_bh(&ab->ce.ce_lock);
while (pipe->rx_buf_needed) {
skb = dev_alloc_skb(pipe->buf_sz);
if (!skb) {
ret = -ENOMEM;
goto exit;
}
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ab->dev, paddr))) {
ath12k_warn(ab, "failed to dma map ce rx buf\n");
dev_kfree_skb_any(skb);
ret = -EIO;
goto exit;
}
ATH12K_SKB_RXCB(skb)->paddr = paddr;
ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
goto exit;
}
}
exit:
spin_unlock_bh(&ab->ce.ce_lock);
return ret;
}
static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
struct sk_buff **skb, int *nbytes)
{
struct ath12k_base *ab = pipe->ab;
struct hal_ce_srng_dst_status_desc *desc;
struct hal_srng *srng;
unsigned int sw_index;
unsigned int nentries_mask;
int ret = 0;
spin_lock_bh(&ab->ce.ce_lock);
sw_index = pipe->dest_ring->sw_index;
nentries_mask = pipe->dest_ring->nentries_mask;
srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
if (!desc) {
ret = -EIO;
goto err;
}
*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
if (*nbytes == 0) {
ret = -EIO;
goto err;
}
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
pipe->dest_ring->sw_index = sw_index;
pipe->rx_buf_needed++;
err:
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return ret;
}
static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
{
struct ath12k_base *ab = pipe->ab;
struct sk_buff *skb;
struct sk_buff_head list;
unsigned int nbytes, max_nbytes;
int ret;
__skb_queue_head_init(&list);
while (ath12k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
}
skb_put(skb, nbytes);
__skb_queue_tail(&list, skb);
}
while ((skb = __skb_dequeue(&list))) {
ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->recv_cb(ab, skb);
}
ret = ath12k_ce_rx_post_pipe(pipe);
if (ret && ret != -ENOSPC) {
ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
pipe->pipe_num, ret);
mod_timer(&ab->rx_replenish_retry,
jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
}
}
static struct sk_buff *ath12k_ce_completed_send_next(struct ath12k_ce_pipe *pipe)
{
struct ath12k_base *ab = pipe->ab;
struct hal_ce_srng_src_desc *desc;
struct hal_srng *srng;
unsigned int sw_index;
unsigned int nentries_mask;
struct sk_buff *skb;
spin_lock_bh(&ab->ce.ce_lock);
sw_index = pipe->src_ring->sw_index;
nentries_mask = pipe->src_ring->nentries_mask;
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
desc = ath12k_hal_srng_src_reap_next(ab, srng);
if (!desc) {
skb = ERR_PTR(-EIO);
goto err_unlock;
}
skb = pipe->src_ring->skb[sw_index];
pipe->src_ring->skb[sw_index] = NULL;
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
pipe->src_ring->sw_index = sw_index;
err_unlock:
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return skb;
}
static void ath12k_ce_send_done_cb(struct ath12k_ce_pipe *pipe)
{
struct ath12k_base *ab = pipe->ab;
struct sk_buff *skb;
while (!IS_ERR(skb = ath12k_ce_completed_send_next(pipe))) {
if (!skb)
continue;
dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
static void ath12k_ce_srng_msi_ring_params_setup(struct ath12k_base *ab, u32 ce_id,
struct hal_srng_params *ring_params)
{
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
u32 addr_lo;
u32 addr_hi;
int ret;
ret = ath12k_hif_get_user_msi_vector(ab, "CE",
&msi_data_count, &msi_data_start,
&msi_irq_start);
if (ret)
return;
ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
ath12k_hif_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
ring_params->msi_addr = addr_lo;
ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
}
static int ath12k_ce_init_ring(struct ath12k_base *ab,
struct ath12k_ce_ring *ce_ring,
int ce_id, enum hal_ring_type type)
{
struct hal_srng_params params = { 0 };
int ret;
params.ring_base_paddr = ce_ring->base_addr_ce_space;
params.ring_base_vaddr = ce_ring->base_addr_owner_space;
params.num_entries = ce_ring->nentries;
if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
ath12k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms);
switch (type) {
case HAL_CE_SRC:
if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
params.intr_batch_cntr_thres_entries = 1;
break;
case HAL_CE_DST:
params.max_buffer_len = ab->hw_params->host_ce_config[ce_id].src_sz_max;
if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_timer_thres_us = 1024;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.low_threshold = ce_ring->nentries - 3;
}
break;
case HAL_CE_DST_STATUS:
if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_batch_cntr_thres_entries = 1;
params.intr_timer_thres_us = 0x1000;
}
break;
default:
ath12k_warn(ab, "Invalid CE ring type %d\n", type);
return -EINVAL;
}
/* TODO: Init other params needed by HAL to init the ring */
ret = ath12k_hal_srng_setup(ab, type, ce_id, 0, ¶ms);
if (ret < 0) {
ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
ret, ce_id);
return ret;
}
ce_ring->hal_ring_id = ret;
return 0;
}
static struct ath12k_ce_ring *
ath12k_ce_alloc_ring(struct ath12k_base *ab, int nentries, int desc_sz)
{
struct ath12k_ce_ring *ce_ring;
dma_addr_t base_addr;
ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
if (!ce_ring)
return ERR_PTR(-ENOMEM);
ce_ring->nentries = nentries;
ce_ring->nentries_mask = nentries - 1;
/* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
ce_ring->base_addr_owner_space_unaligned =
dma_alloc_coherent(ab->dev,
nentries * desc_sz + CE_DESC_RING_ALIGN,
&base_addr, GFP_KERNEL);
if (!ce_ring->base_addr_owner_space_unaligned) {
kfree(ce_ring);
return ERR_PTR(-ENOMEM);
}
ce_ring->base_addr_ce_space_unaligned = base_addr;
ce_ring->base_addr_owner_space =
PTR_ALIGN(ce_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
ce_ring->base_addr_ce_space = ALIGN(ce_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
return ce_ring;
}
static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id)
{
struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
const struct ce_attr *attr = &ab->hw_params->host_ce_config[ce_id];
struct ath12k_ce_ring *ring;
int nentries;
int desc_sz;
pipe->attr_flags = attr->flags;
if (attr->src_nentries) {
pipe->send_cb = ath12k_ce_send_done_cb;
nentries = roundup_pow_of_two(attr->src_nentries);
desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
if (IS_ERR(ring))
return PTR_ERR(ring);
pipe->src_ring = ring;
}
if (attr->dest_nentries) {
pipe->recv_cb = attr->recv_cb;
nentries = roundup_pow_of_two(attr->dest_nentries);
desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
if (IS_ERR(ring))
return PTR_ERR(ring);
pipe->dest_ring = ring;
desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
if (IS_ERR(ring))
return PTR_ERR(ring);
pipe->status_ring = ring;
}
return 0;
}
void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id)
{
struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
if (pipe->send_cb)
pipe->send_cb(pipe);
if (pipe->recv_cb)
ath12k_ce_recv_process_cb(pipe);
}
void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id)
{
struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
pipe->send_cb(pipe);
}
int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
u16 transfer_id)
{
struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
struct hal_ce_srng_src_desc *desc;
struct hal_srng *srng;
unsigned int write_index, sw_index;
unsigned int nentries_mask;
int ret = 0;
u8 byte_swap_data = 0;
int num_used;
/* Check if some entries could be regained by handling tx completion if
* the CE has interrupts disabled and the used entries is more than the
* defined usage threshold.
*/
if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
spin_lock_bh(&ab->ce.ce_lock);
write_index = pipe->src_ring->write_index;
sw_index = pipe->src_ring->sw_index;
if (write_index >= sw_index)
num_used = write_index - sw_index;
else
num_used = pipe->src_ring->nentries - sw_index +
write_index;
spin_unlock_bh(&ab->ce.ce_lock);
if (num_used > ATH12K_CE_USAGE_THRESHOLD)
ath12k_ce_poll_send_completed(ab, pipe->pipe_num);
}
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
return -ESHUTDOWN;
spin_lock_bh(&ab->ce.ce_lock);
write_index = pipe->src_ring->write_index;
nentries_mask = pipe->src_ring->nentries_mask;
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
ath12k_hal_srng_access_end(ab, srng);
ret = -ENOBUFS;
goto unlock;
}
desc = ath12k_hal_srng_src_get_next_reaped(ab, srng);
if (!desc) {
ath12k_hal_srng_access_end(ab, srng);
ret = -ENOBUFS;
goto unlock;
}
if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
byte_swap_data = 1;
ath12k_hal_ce_src_set_desc(desc, ATH12K_SKB_CB(skb)->paddr,
skb->len, transfer_id, byte_swap_data);
pipe->src_ring->skb[write_index] = skb;
pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
write_index);
ath12k_hal_srng_access_end(ab, srng);
unlock:
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
return ret;
}
static void ath12k_ce_rx_pipe_cleanup(struct ath12k_ce_pipe *pipe)
{
struct ath12k_base *ab = pipe->ab;
struct ath12k_ce_ring *ring = pipe->dest_ring;
struct sk_buff *skb;
int i;
if (!(ring && pipe->buf_sz))
return;
for (i = 0; i < ring->nentries; i++) {
skb = ring->skb[i];
if (!skb)
continue;
ring->skb[i] = NULL;
dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
}
void ath12k_ce_cleanup_pipes(struct ath12k_base *ab)
{
struct ath12k_ce_pipe *pipe;
int pipe_num;
for (pipe_num = 0; pipe_num < ab->hw_params->ce_count; pipe_num++) {
pipe = &ab->ce.ce_pipe[pipe_num];
ath12k_ce_rx_pipe_cleanup(pipe);
/* Cleanup any src CE's which have interrupts disabled */
ath12k_ce_poll_send_completed(ab, pipe_num);
/* NOTE: Should we also clean up tx buffer in all pipes? */
}
}
void ath12k_ce_rx_post_buf(struct ath12k_base *ab)
{
struct ath12k_ce_pipe *pipe;
int i;
int ret;
for (i = 0; i < ab->hw_params->ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
ret = ath12k_ce_rx_post_pipe(pipe);
if (ret) {
if (ret == -ENOSPC)
continue;
ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
i, ret);
mod_timer(&ab->rx_replenish_retry,
jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
return;
}
}
}
void ath12k_ce_rx_replenish_retry(struct timer_list *t)
{
struct ath12k_base *ab = from_timer(ab, t, rx_replenish_retry);
ath12k_ce_rx_post_buf(ab);
}
static void ath12k_ce_shadow_config(struct ath12k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params->ce_count; i++) {
if (ab->hw_params->host_ce_config[i].src_nentries)
ath12k_hal_srng_update_shadow_config(ab, HAL_CE_SRC, i);
if (ab->hw_params->host_ce_config[i].dest_nentries) {
ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST, i);
ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST_STATUS, i);
}
}
}
void ath12k_ce_get_shadow_config(struct ath12k_base *ab,
u32 **shadow_cfg, u32 *shadow_cfg_len)
{
if (!ab->hw_params->supports_shadow_regs)
return;
ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
/* shadow is already configured */
if (*shadow_cfg_len)
return;
/* shadow isn't configured yet, configure now.
* non-CE srngs are configured firstly, then
* all CE srngs.
*/
ath12k_hal_srng_shadow_config(ab);
ath12k_ce_shadow_config(ab);
/* get the shadow configuration */
ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
}
int ath12k_ce_init_pipes(struct ath12k_base *ab)
{
struct ath12k_ce_pipe *pipe;
int i;
int ret;
ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
&ab->qmi.ce_cfg.shadow_reg_v3_len);
for (i = 0; i < ab->hw_params->ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
if (pipe->src_ring) {
ret = ath12k_ce_init_ring(ab, pipe->src_ring, i,
HAL_CE_SRC);
if (ret) {
ath12k_warn(ab, "failed to init src ring: %d\n",
ret);
/* Should we clear any partial init */
return ret;
}
pipe->src_ring->write_index = 0;
pipe->src_ring->sw_index = 0;
}
if (pipe->dest_ring) {
ret = ath12k_ce_init_ring(ab, pipe->dest_ring, i,
HAL_CE_DST);
if (ret) {
ath12k_warn(ab, "failed to init dest ring: %d\n",
ret);
/* Should we clear any partial init */
return ret;
}
pipe->rx_buf_needed = pipe->dest_ring->nentries ?
pipe->dest_ring->nentries - 2 : 0;
pipe->dest_ring->write_index = 0;
pipe->dest_ring->sw_index = 0;
}
if (pipe->status_ring) {
ret = ath12k_ce_init_ring(ab, pipe->status_ring, i,
HAL_CE_DST_STATUS);
if (ret) {
ath12k_warn(ab, "failed to init dest status ing: %d\n",
ret);
/* Should we clear any partial init */
return ret;
}
pipe->status_ring->write_index = 0;
pipe->status_ring->sw_index = 0;
}
}
return 0;
}
void ath12k_ce_free_pipes(struct ath12k_base *ab)
{
struct ath12k_ce_pipe *pipe;
int desc_sz;
int i;
for (i = 0; i < ab->hw_params->ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
if (pipe->src_ring) {
desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
dma_free_coherent(ab->dev,
pipe->src_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->src_ring->base_addr_owner_space,
pipe->src_ring->base_addr_ce_space);
kfree(pipe->src_ring);
pipe->src_ring = NULL;
}
if (pipe->dest_ring) {
desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
dma_free_coherent(ab->dev,
pipe->dest_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->dest_ring->base_addr_owner_space,
pipe->dest_ring->base_addr_ce_space);
kfree(pipe->dest_ring);
pipe->dest_ring = NULL;
}
if (pipe->status_ring) {
desc_sz =
ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
dma_free_coherent(ab->dev,
pipe->status_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
pipe->status_ring->base_addr_owner_space,
pipe->status_ring->base_addr_ce_space);
kfree(pipe->status_ring);
pipe->status_ring = NULL;
}
}
}
int ath12k_ce_alloc_pipes(struct ath12k_base *ab)
{
struct ath12k_ce_pipe *pipe;
int i;
int ret;
const struct ce_attr *attr;
spin_lock_init(&ab->ce.ce_lock);
for (i = 0; i < ab->hw_params->ce_count; i++) {
attr = &ab->hw_params->host_ce_config[i];
pipe = &ab->ce.ce_pipe[i];
pipe->pipe_num = i;
pipe->ab = ab;
pipe->buf_sz = attr->src_sz_max;
ret = ath12k_ce_alloc_pipe(ab, i);
if (ret) {
/* Free any partial successful allocation */
ath12k_ce_free_pipes(ab);
return ret;
}
}
return 0;
}
int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id)
{
if (ce_id >= ab->hw_params->ce_count)
return -EINVAL;
return ab->hw_params->host_ce_config[ce_id].flags;
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/ce.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_mon.h"
#include "debug.h"
#include "dp_rx.h"
#include "dp_tx.h"
#include "peer.h"
static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
(struct hal_rx_ppdu_end_user_stats *)rx_tlv;
rx_user_status->ul_ofdma_user_v0_word0 =
__le32_to_cpu(ppdu_end_user->usr_resp_ref);
rx_user_status->ul_ofdma_user_v0_word1 =
__le32_to_cpu(ppdu_end_user->usr_resp_ref_ext);
}
static void
ath12k_dp_mon_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
(struct hal_rx_ppdu_end_user_stats *)rx_tlv;
u32 mpdu_ok_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_ok_cnt);
u32 mpdu_err_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_err_cnt);
rx_user_status->mpdu_ok_byte_count =
u32_get_bits(mpdu_ok_byte_count,
HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT);
rx_user_status->mpdu_err_byte_count =
u32_get_bits(mpdu_err_byte_count,
HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT);
}
static void
ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *rx_user_status)
{
rx_user_status->ast_index = ppdu_info->ast_index;
rx_user_status->tid = ppdu_info->tid;
rx_user_status->tcp_ack_msdu_count =
ppdu_info->tcp_ack_msdu_count;
rx_user_status->tcp_msdu_count =
ppdu_info->tcp_msdu_count;
rx_user_status->udp_msdu_count =
ppdu_info->udp_msdu_count;
rx_user_status->other_msdu_count =
ppdu_info->other_msdu_count;
rx_user_status->frame_control = ppdu_info->frame_control;
rx_user_status->frame_control_info_valid =
ppdu_info->frame_control_info_valid;
rx_user_status->data_sequence_control_info_valid =
ppdu_info->data_sequence_control_info_valid;
rx_user_status->first_data_seq_ctrl =
ppdu_info->first_data_seq_ctrl;
rx_user_status->preamble_type = ppdu_info->preamble_type;
rx_user_status->ht_flags = ppdu_info->ht_flags;
rx_user_status->vht_flags = ppdu_info->vht_flags;
rx_user_status->he_flags = ppdu_info->he_flags;
rx_user_status->rs_flags = ppdu_info->rs_flags;
rx_user_status->mpdu_cnt_fcs_ok =
ppdu_info->num_mpdu_fcs_ok;
rx_user_status->mpdu_cnt_fcs_err =
ppdu_info->num_mpdu_fcs_err;
memcpy(&rx_user_status->mpdu_fcs_ok_bitmap[0], &ppdu_info->mpdu_fcs_ok_bitmap[0],
HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
}
static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_vht_sig_a_info *vht_sig =
(struct hal_rx_vht_sig_a_info *)tlv_data;
u32 nsts, group_id, info0, info1;
u8 gi_setting;
info0 = __le32_to_cpu(vht_sig->info0);
info1 = __le32_to_cpu(vht_sig->info1);
ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
ppdu_info->mcs = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_MCS);
gi_setting = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING);
switch (gi_setting) {
case HAL_RX_VHT_SIG_A_NORMAL_GI:
ppdu_info->gi = HAL_RX_GI_0_8_US;
break;
case HAL_RX_VHT_SIG_A_SHORT_GI:
case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
ppdu_info->gi = HAL_RX_GI_0_4_US;
break;
}
ppdu_info->is_stbc = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_STBC);
nsts = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS);
if (ppdu_info->is_stbc && nsts > 0)
nsts = ((nsts + 1) >> 1) - 1;
ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK);
ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW);
ppdu_info->beamformed = u32_get_bits(info1,
HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED);
group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
if (group_id == 0 || group_id == 63)
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
else
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
ppdu_info->vht_flag_values5 = group_id;
ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
ppdu_info->nss);
ppdu_info->vht_flag_values2 = ppdu_info->bw;
ppdu_info->vht_flag_values4 =
u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
}
static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_ht_sig_info *ht_sig =
(struct hal_rx_ht_sig_info *)tlv_data;
u32 info0 = __le32_to_cpu(ht_sig->info0);
u32 info1 = __le32_to_cpu(ht_sig->info1);
ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_MCS);
ppdu_info->bw = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_BW);
ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC);
ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING);
ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI);
ppdu_info->nss = (ppdu_info->mcs >> 3);
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_lsig_b_info *lsigb =
(struct hal_rx_lsig_b_info *)tlv_data;
u32 info0 = __le32_to_cpu(lsigb->info0);
u8 rate;
rate = u32_get_bits(info0, HAL_RX_LSIG_B_INFO_INFO0_RATE);
switch (rate) {
case 1:
rate = HAL_RX_LEGACY_RATE_1_MBPS;
break;
case 2:
case 5:
rate = HAL_RX_LEGACY_RATE_2_MBPS;
break;
case 3:
case 6:
rate = HAL_RX_LEGACY_RATE_5_5_MBPS;
break;
case 4:
case 7:
rate = HAL_RX_LEGACY_RATE_11_MBPS;
break;
default:
rate = HAL_RX_LEGACY_RATE_INVALID;
}
ppdu_info->rate = rate;
ppdu_info->cck_flag = 1;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_lsig_a_info *lsiga =
(struct hal_rx_lsig_a_info *)tlv_data;
u32 info0 = __le32_to_cpu(lsiga->info0);
u8 rate;
rate = u32_get_bits(info0, HAL_RX_LSIG_A_INFO_INFO0_RATE);
switch (rate) {
case 8:
rate = HAL_RX_LEGACY_RATE_48_MBPS;
break;
case 9:
rate = HAL_RX_LEGACY_RATE_24_MBPS;
break;
case 10:
rate = HAL_RX_LEGACY_RATE_12_MBPS;
break;
case 11:
rate = HAL_RX_LEGACY_RATE_6_MBPS;
break;
case 12:
rate = HAL_RX_LEGACY_RATE_54_MBPS;
break;
case 13:
rate = HAL_RX_LEGACY_RATE_36_MBPS;
break;
case 14:
rate = HAL_RX_LEGACY_RATE_18_MBPS;
break;
case 15:
rate = HAL_RX_LEGACY_RATE_9_MBPS;
break;
default:
rate = HAL_RX_LEGACY_RATE_INVALID;
}
ppdu_info->rate = rate;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
(struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
u32 info0, value;
info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN;
/* HE-data2 */
ppdu_info->he_data2 |= HE_TXBF_KNOWN;
ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS);
value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM);
value = value << HE_DCM_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING);
ppdu_info->ldpc = value;
value = value << HE_CODING_SHIFT;
ppdu_info->he_data3 |= value;
/* HE-data4 */
value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID);
value = value << HE_STA_ID_SHIFT;
ppdu_info->he_data4 |= value;
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS);
ppdu_info->beamformed = u32_get_bits(info0,
HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF);
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
}
static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
(struct hal_rx_he_sig_b2_mu_info *)tlv_data;
u32 info0, value;
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_CODING_KNOWN;
ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS);
value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING);
ppdu_info->ldpc = value;
value = value << HE_CODING_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID);
value = value << HE_STA_ID_SHIFT;
ppdu_info->he_data4 |= value;
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS);
}
static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
(struct hal_rx_he_sig_b1_mu_info *)tlv_data;
u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0);
u16 ru_tones;
ru_tones = u32_get_bits(info0,
HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION);
ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
ppdu_info->he_RU[0] = ru_tones;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
(struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
u32 info0, info1, value;
u16 he_gi = 0, he_ltf = 0;
info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
ppdu_info->he_mu_flags = 1;
ppdu_info->he_data1 = HE_MU_FORMAT_TYPE;
ppdu_info->he_data1 |=
HE_BSS_COLOR_KNOWN |
HE_DL_UL_KNOWN |
HE_LDPC_EXTRA_SYMBOL_KNOWN |
HE_STBC_KNOWN |
HE_DATA_BW_RU_KNOWN |
HE_DOPPLER_KNOWN;
ppdu_info->he_data2 =
HE_GI_KNOWN |
HE_LTF_SYMBOLS_KNOWN |
HE_PRE_FEC_PADDING_KNOWN |
HE_PE_DISAMBIGUITY_KNOWN |
HE_TXOP_KNOWN |
HE_MIDABLE_PERIODICITY_KNOWN;
/* data3 */
ppdu_info->he_data3 = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR);
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG);
value = value << HE_DL_UL_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA);
value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC);
value = value << HE_STBC_SHIFT;
ppdu_info->he_data3 |= value;
/* data4 */
ppdu_info->he_data4 = u32_get_bits(info0,
HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE);
ppdu_info->he_data4 = value;
/* data5 */
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
ppdu_info->he_data5 = value;
ppdu_info->bw = value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
break;
case 1:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
break;
}
ppdu_info->gi = he_gi;
value = he_gi << HE_GI_SHIFT;
ppdu_info->he_data5 |= value;
value = he_ltf << HE_LTF_SIZE_SHIFT;
ppdu_info->he_data5 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB);
value = (value << HE_LTF_SYM_SHIFT);
ppdu_info->he_data5 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR);
value = value << HE_PRE_FEC_PAD_SHIFT;
ppdu_info->he_data5 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM);
value = value << HE_PE_DISAMBIGUITY_SHIFT;
ppdu_info->he_data5 |= value;
/*data6*/
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION);
value = value << HE_DOPPLER_SHIFT;
ppdu_info->he_data6 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION);
value = value << HE_TXOP_SHIFT;
ppdu_info->he_data6 |= value;
/* HE-MU Flags */
/* HE-MU-flags1 */
ppdu_info->he_flags1 =
HE_SIG_B_MCS_KNOWN |
HE_SIG_B_DCM_KNOWN |
HE_SIG_B_COMPRESSION_FLAG_1_KNOWN |
HE_SIG_B_SYM_NUM_KNOWN |
HE_RU_0_KNOWN;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB);
ppdu_info->he_flags1 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB);
value = value << HE_DCM_FLAG_1_SHIFT;
ppdu_info->he_flags1 |= value;
/* HE-MU-flags2 */
ppdu_info->he_flags2 = HE_BW_KNOWN;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
ppdu_info->he_flags2 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB);
value = value << HE_SIG_B_COMPRESSION_FLAG_2_SHIFT;
ppdu_info->he_flags2 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB);
value = value - 1;
value = value << HE_NUM_SIG_B_SYMBOLS_SHIFT;
ppdu_info->he_flags2 |= value;
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_he_sig_a_su_info *he_sig_a =
(struct hal_rx_he_sig_a_su_info *)tlv_data;
u32 info0, info1, value;
u32 dcm;
u8 he_dcm = 0, he_stbc = 0;
u16 he_gi = 0, he_ltf = 0;
ppdu_info->he_flags = 1;
info0 = __le32_to_cpu(he_sig_a->info0);
info1 = __le32_to_cpu(he_sig_a->info1);
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND);
if (value == 0)
ppdu_info->he_data1 = HE_TRIG_FORMAT_TYPE;
else
ppdu_info->he_data1 = HE_SU_FORMAT_TYPE;
ppdu_info->he_data1 |=
HE_BSS_COLOR_KNOWN |
HE_BEAM_CHANGE_KNOWN |
HE_DL_UL_KNOWN |
HE_MCS_KNOWN |
HE_DCM_KNOWN |
HE_CODING_KNOWN |
HE_LDPC_EXTRA_SYMBOL_KNOWN |
HE_STBC_KNOWN |
HE_DATA_BW_RU_KNOWN |
HE_DOPPLER_KNOWN;
ppdu_info->he_data2 |=
HE_GI_KNOWN |
HE_TXBF_KNOWN |
HE_PE_DISAMBIGUITY_KNOWN |
HE_TXOP_KNOWN |
HE_LTF_SYMBOLS_KNOWN |
HE_PRE_FEC_PADDING_KNOWN |
HE_MIDABLE_PERIODICITY_KNOWN;
ppdu_info->he_data3 = u32_get_bits(info0,
HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR);
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE);
value = value << HE_BEAM_CHANGE_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG);
value = value << HE_DL_UL_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
ppdu_info->mcs = value;
value = value << HE_TRANSMIT_MCS_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
he_dcm = value;
value = value << HE_DCM_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
value = value << HE_CODING_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA);
value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
ppdu_info->he_data3 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
he_stbc = value;
value = value << HE_STBC_SHIFT;
ppdu_info->he_data3 |= value;
/* data4 */
ppdu_info->he_data4 = u32_get_bits(info0,
HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE);
/* data5 */
value = u32_get_bits(info0,
HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
ppdu_info->he_data5 = value;
ppdu_info->bw = value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_1_X;
break;
case 1:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
if (he_dcm && he_stbc) {
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
} else {
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
}
break;
}
ppdu_info->gi = he_gi;
value = he_gi << HE_GI_SHIFT;
ppdu_info->he_data5 |= value;
value = he_ltf << HE_LTF_SIZE_SHIFT;
ppdu_info->ltf_size = he_ltf;
ppdu_info->he_data5 |= value;
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
value = (value << HE_LTF_SYM_SHIFT);
ppdu_info->he_data5 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR);
value = value << HE_PRE_FEC_PAD_SHIFT;
ppdu_info->he_data5 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
value = value << HE_TXBF_SHIFT;
ppdu_info->he_data5 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM);
value = value << HE_PE_DISAMBIGUITY_SHIFT;
ppdu_info->he_data5 |= value;
/* data6 */
value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
value++;
ppdu_info->he_data6 = value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND);
value = value << HE_DOPPLER_SHIFT;
ppdu_info->he_data6 |= value;
value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION);
value = value << HE_TXOP_SHIFT;
ppdu_info->he_data6 |= value;
ppdu_info->mcs =
u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
ppdu_info->bw =
u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
ppdu_info->dcm = dcm;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
struct ath12k_mon_data *pmon,
u32 tlv_tag, u8 *tlv_data, u32 userid)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
u32 info[7];
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
struct hal_rx_ppdu_start *ppdu_start =
(struct hal_rx_ppdu_start *)tlv_data;
info[0] = __le32_to_cpu(ppdu_start->info0);
ppdu_info->ppdu_id =
u32_get_bits(info[0], HAL_RX_PPDU_START_INFO0_PPDU_ID);
ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) {
ppdu_info->last_ppdu_id = ppdu_info->ppdu_id;
ppdu_info->num_users = 0;
memset(&ppdu_info->mpdu_fcs_ok_bitmap, 0,
HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
}
break;
}
case HAL_RX_PPDU_END_USER_STATS: {
struct hal_rx_ppdu_end_user_stats *eu_stats =
(struct hal_rx_ppdu_end_user_stats *)tlv_data;
info[0] = __le32_to_cpu(eu_stats->info0);
info[1] = __le32_to_cpu(eu_stats->info1);
info[2] = __le32_to_cpu(eu_stats->info2);
info[4] = __le32_to_cpu(eu_stats->info4);
info[5] = __le32_to_cpu(eu_stats->info5);
info[6] = __le32_to_cpu(eu_stats->info6);
ppdu_info->ast_index =
u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX);
ppdu_info->fc_valid =
u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
ppdu_info->tid =
ffs(u32_get_bits(info[6],
HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP)
- 1);
ppdu_info->tcp_msdu_count =
u32_get_bits(info[4],
HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
ppdu_info->udp_msdu_count =
u32_get_bits(info[4],
HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT);
ppdu_info->other_msdu_count =
u32_get_bits(info[5],
HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT);
ppdu_info->tcp_ack_msdu_count =
u32_get_bits(info[5],
HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT);
ppdu_info->preamble_type =
u32_get_bits(info[1],
HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE);
ppdu_info->num_mpdu_fcs_ok =
u32_get_bits(info[1],
HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK);
ppdu_info->num_mpdu_fcs_err =
u32_get_bits(info[0],
HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
switch (ppdu_info->preamble_type) {
case HAL_RX_PREAMBLE_11N:
ppdu_info->ht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AC:
ppdu_info->vht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AX:
ppdu_info->he_flags = 1;
break;
default:
break;
}
if (userid < HAL_MAX_UL_MU_USERS) {
struct hal_rx_user_status *rxuser_stats =
&ppdu_info->userstats[userid];
ppdu_info->num_users += 1;
ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats);
ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info,
rxuser_stats);
}
ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]);
ppdu_info->mpdu_fcs_ok_bitmap[1] = __le32_to_cpu(eu_stats->rsvd1[1]);
break;
}
case HAL_RX_PPDU_END_USER_STATS_EXT: {
struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
(struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1);
ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2);
ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3);
ppdu_info->mpdu_fcs_ok_bitmap[5] = __le32_to_cpu(eu_stats->info4);
ppdu_info->mpdu_fcs_ok_bitmap[6] = __le32_to_cpu(eu_stats->info5);
ppdu_info->mpdu_fcs_ok_bitmap[7] = __le32_to_cpu(eu_stats->info6);
break;
}
case HAL_PHYRX_HT_SIG:
ath12k_dp_mon_parse_ht_sig(tlv_data, ppdu_info);
break;
case HAL_PHYRX_L_SIG_B:
ath12k_dp_mon_parse_l_sig_b(tlv_data, ppdu_info);
break;
case HAL_PHYRX_L_SIG_A:
ath12k_dp_mon_parse_l_sig_a(tlv_data, ppdu_info);
break;
case HAL_PHYRX_VHT_SIG_A:
ath12k_dp_mon_parse_vht_sig_a(tlv_data, ppdu_info);
break;
case HAL_PHYRX_HE_SIG_A_SU:
ath12k_dp_mon_parse_he_sig_su(tlv_data, ppdu_info);
break;
case HAL_PHYRX_HE_SIG_A_MU_DL:
ath12k_dp_mon_parse_he_sig_mu(tlv_data, ppdu_info);
break;
case HAL_PHYRX_HE_SIG_B1_MU:
ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, ppdu_info);
break;
case HAL_PHYRX_HE_SIG_B2_MU:
ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, ppdu_info);
break;
case HAL_PHYRX_HE_SIG_B2_OFDMA:
ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, ppdu_info);
break;
case HAL_PHYRX_RSSI_LEGACY: {
struct hal_rx_phyrx_rssi_legacy_info *rssi =
(struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
u32 reception_type = 0;
u32 rssi_legacy_info = __le32_to_cpu(rssi->rsvd[0]);
info[0] = __le32_to_cpu(rssi->info0);
/* TODO: Please note that the combined rssi will not be accurate
* in MU case. Rssi in MU needs to be retrieved from
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
u32_get_bits(info[0],
HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB);
reception_type =
u32_get_bits(rssi_legacy_info,
HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION);
switch (reception_type) {
case HAL_RECEPTION_TYPE_ULOFMDA:
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
break;
case HAL_RECEPTION_TYPE_ULMIMO:
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
default:
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
break;
}
case HAL_RXPCU_PPDU_END_INFO: {
struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
(struct hal_rx_ppdu_end_duration *)tlv_data;
info[0] = __le32_to_cpu(ppdu_rx_duration->info0);
ppdu_info->rx_duration =
u32_get_bits(info[0], HAL_RX_PPDU_END_DURATION);
ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
ppdu_info->tsft = (ppdu_info->tsft << 32) |
__le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
break;
}
case HAL_RX_MPDU_START: {
struct hal_rx_mpdu_start *mpdu_start =
(struct hal_rx_mpdu_start *)tlv_data;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
u16 peer_id;
info[1] = __le32_to_cpu(mpdu_start->info1);
peer_id = u32_get_bits(info[1], HAL_RX_MPDU_START_INFO1_PEERID);
if (peer_id)
ppdu_info->peer_id = peer_id;
ppdu_info->mpdu_len += u32_get_bits(info[1],
HAL_RX_MPDU_START_INFO2_MPDU_LEN);
if (userid < HAL_MAX_UL_MU_USERS) {
info[0] = __le32_to_cpu(mpdu_start->info0);
ppdu_info->userid = userid;
ppdu_info->ampdu_id[userid] =
u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID);
}
mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
if (!mon_mpdu)
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
break;
}
case HAL_RX_MSDU_START:
/* TODO: add msdu start parsing logic */
break;
case HAL_MON_BUF_ADDR: {
struct dp_rxdma_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
struct dp_mon_packet_info *packet_info =
(struct dp_mon_packet_info *)tlv_data;
int buf_id = u32_get_bits(packet_info->cookie,
DP_RXDMA_BUF_COOKIE_BUF_ID);
struct sk_buff *msdu;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
struct ath12k_skb_rxcb *rxcb;
spin_lock_bh(&buf_ring->idr_lock);
msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
spin_unlock_bh(&buf_ring->idr_lock);
if (unlikely(!msdu)) {
ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
buf_id);
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
}
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
if (mon_mpdu->tail)
mon_mpdu->tail->next = msdu;
else
mon_mpdu->tail = msdu;
ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
break;
}
case HAL_RX_MSDU_END: {
struct rx_msdu_end_qcn9274 *msdu_end =
(struct rx_msdu_end_qcn9274 *)tlv_data;
bool is_first_msdu_in_mpdu;
u16 msdu_end_info;
msdu_end_info = __le16_to_cpu(msdu_end->info5);
is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info,
RX_MSDU_END_INFO5_FIRST_MSDU);
if (is_first_msdu_in_mpdu) {
pmon->mon_mpdu->head = pmon->mon_mpdu->tail;
pmon->mon_mpdu->tail = NULL;
}
break;
}
case HAL_RX_MPDU_END:
list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
break;
case HAL_DUMMY:
return HAL_RX_MON_STATUS_BUF_DONE;
case HAL_RX_PPDU_END_STATUS_DONE:
case 0:
return HAL_RX_MON_STATUS_PPDU_DONE;
default:
break;
}
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
}
static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *msdu)
{
u32 rx_pkt_offset, l2_hdr_offset;
rx_pkt_offset = ar->ab->hw_params->hal_desc_sz;
l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
(struct hal_rx_desc *)msdu->data);
skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
}
static struct sk_buff *
ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
u32 mac_id, struct sk_buff *head_msdu,
struct ieee80211_rx_status *rxs, bool *fcs_err)
{
struct ath12k_base *ab = ar->ab;
struct sk_buff *msdu, *mpdu_buf, *prev_buf;
struct hal_rx_desc *rx_desc;
u8 *hdr_desc, *dest, decap_format;
struct ieee80211_hdr_3addr *wh;
u32 err_bitmap;
mpdu_buf = NULL;
if (!head_msdu)
goto err_merge_fail;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
*fcs_err = true;
decap_format = ath12k_dp_rx_h_decap_type(ab, rx_desc);
ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu);
prev_buf = head_msdu;
msdu = head_msdu->next;
while (msdu) {
ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
prev_buf = msdu;
msdu = msdu->next;
}
prev_buf->next = NULL;
skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc);
/* Base size */
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
if (ieee80211_is_data_qos(wh->frame_control))
qos_pkt = 1;
msdu = head_msdu;
while (msdu) {
ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
goto err_merge_fail;
memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
}
prev_buf = msdu;
msdu = msdu->next;
}
dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
if (!dest)
goto err_merge_fail;
ath12k_dbg(ab, ATH12K_DBG_DATA,
"mpdu_buf %pK mpdu_buf->len %u",
prev_buf, prev_buf->len);
} else {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"decap format %d is not supported!\n",
decap_format);
goto err_merge_fail;
}
return head_msdu;
err_merge_fail:
if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"err_merge_fail mpdu_buf %pK", mpdu_buf);
/* Free the head buffer */
dev_kfree_skb_any(mpdu_buf);
}
return NULL;
}
static void
ath12k_dp_mon_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
u8 *rtap_buf)
{
u32 rtap_len = 0;
put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
}
static void
ath12k_dp_mon_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
u8 *rtap_buf)
{
u32 rtap_len = 0;
put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
rtap_len += 2;
rtap_buf[rtap_len] = rx_status->he_RU[0];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[1];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[2];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[3];
}
static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *mon_skb,
struct ieee80211_rx_status *rxs)
{
struct ieee80211_supported_band *sband;
u8 *ptr = NULL;
u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
rxs->flag |= RX_FLAG_MACTIME_START;
rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
rxs->nss = ppduinfo->nss + 1;
if (ampdu_id) {
rxs->flag |= RX_FLAG_AMPDU_DETAILS;
rxs->ampdu_reference = ampdu_id;
}
if (ppduinfo->he_mu_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
ath12k_dp_mon_rx_update_radiotap_he_mu(ppduinfo, ptr);
} else if (ppduinfo->he_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
ath12k_dp_mon_rx_update_radiotap_he(ppduinfo, ptr);
rxs->rate_idx = ppduinfo->rate;
} else if (ppduinfo->vht_flags) {
rxs->encoding = RX_ENC_VHT;
rxs->rate_idx = ppduinfo->rate;
} else if (ppduinfo->ht_flags) {
rxs->encoding = RX_ENC_HT;
rxs->rate_idx = ppduinfo->rate;
} else {
rxs->encoding = RX_ENC_LEGACY;
sband = &ar->mac.sbands[rxs->band];
rxs->rate_idx = ath12k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
ppduinfo->cck_flag);
}
rxs->mactime = ppduinfo->tsft;
}
static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
struct ieee80211_rx_status *status)
{
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
};
struct ieee80211_rx_status *rx_status;
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_sta *pubsta = NULL;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
u8 decap = DP_RX_DECAP_TYPE_RAW;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol_tkip = rxcb->is_eapol;
if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
!(status->flag & RX_FLAG_SKIP_MONITOR)) {
he = skb_push(msdu, sizeof(known));
memcpy(he, &known, sizeof(known));
status->flag |= RX_FLAG_RADIOTAP_HE;
}
if (!(status->flag & RX_FLAG_ONLY_MONITOR))
decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
if (peer && peer->sta)
pubsta = peer->sta;
spin_unlock_bh(&ar->ab->base_lock);
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
"rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
rxcb->tid,
(is_mcbc) ? "mcast" : "ucast",
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
(status->encoding == RX_ENC_HE) ? "he" : "",
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->nss,
status->freq,
status->band, status->flag,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
ath12k_dbg_dump(ar->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
msdu->data, msdu->len);
rx_status = IEEE80211_SKB_RXCB(msdu);
*rx_status = *status;
/* TODO: trace rx packet */
/* PN for multicast packets are not validate in HW,
* so skip 802.3 rx path
* Also, fast_rx expects the STA to be authorized, hence
* eapol packets are sent in slow path.
*/
if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol_tkip &&
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
struct sk_buff *head_msdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct napi_struct *napi)
{
struct ath12k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
struct ieee80211_rx_status *rxs = &dp->rx_status;
bool fcs_err = false;
mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, head_msdu,
rxs, &fcs_err);
if (!mon_skb)
goto mon_deliver_fail;
header = mon_skb;
rxs->flag = 0;
if (fcs_err)
rxs->flag = RX_FLAG_FAILED_FCS_CRC;
do {
skb_next = mon_skb->next;
if (!skb_next)
rxs->flag &= ~RX_FLAG_AMSDU_MORE;
else
rxs->flag |= RX_FLAG_AMSDU_MORE;
if (mon_skb == header) {
header = NULL;
rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
} else {
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
return 0;
mon_deliver_fail:
mon_skb = head_msdu;
while (mon_skb) {
skb_next = mon_skb->next;
dev_kfree_skb_any(mon_skb);
mon_skb = skb_next;
}
return -EINVAL;
}
static enum hal_rx_mon_status
ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon,
struct sk_buff *skb)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct hal_tlv_hdr *tlv;
enum hal_rx_mon_status hal_status;
u32 tlv_userid = 0;
u16 tlv_tag, tlv_len;
u8 *ptr = skb->data;
memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
do {
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
* TLVs that follow. Skip the TLV header and
* rx_rxpcu_classification_overview that follows the header to get to
* next TLV.
*/
if (tlv_tag == HAL_RX_PPDU_END)
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
break;
} while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
return hal_status;
}
enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
int mac_id,
struct sk_buff *skb,
struct napi_struct *napi)
{
struct ath12k_base *ab = ar->ab;
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct dp_mon_mpdu *tmp;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
struct sk_buff *head_msdu, *tail_msdu;
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
head_msdu = mon_mpdu->head;
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
ppdu_info, napi);
}
kfree(mon_mpdu);
}
return hal_status;
}
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *buf_ring,
int req_entries)
{
struct hal_mon_buf_ring *mon_buf;
struct sk_buff *skb;
struct hal_srng *srng;
dma_addr_t paddr;
u32 cookie;
int buf_id;
srng = &ab->hal.srng_list[buf_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
while (req_entries > 0) {
skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE);
if (unlikely(!skb))
goto fail_alloc_skb;
if (!IS_ALIGNED((unsigned long)skb->data, DP_RX_BUFFER_ALIGN_SIZE)) {
skb_pull(skb,
PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
skb->data);
}
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ab->dev, paddr)))
goto fail_free_skb;
spin_lock_bh(&buf_ring->idr_lock);
buf_id = idr_alloc(&buf_ring->bufs_idr, skb, 0,
buf_ring->bufs_max * 3, GFP_ATOMIC);
spin_unlock_bh(&buf_ring->idr_lock);
if (unlikely(buf_id < 0))
goto fail_dma_unmap;
mon_buf = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (unlikely(!mon_buf))
goto fail_idr_remove;
ATH12K_SKB_RXCB(skb)->paddr = paddr;
cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
mon_buf->paddr_lo = cpu_to_le32(lower_32_bits(paddr));
mon_buf->paddr_hi = cpu_to_le32(upper_32_bits(paddr));
mon_buf->cookie = cpu_to_le64(cookie);
req_entries--;
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return 0;
fail_idr_remove:
spin_lock_bh(&buf_ring->idr_lock);
idr_remove(&buf_ring->bufs_idr, buf_id);
spin_unlock_bh(&buf_ring->idr_lock);
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
fail_alloc_skb:
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return -ENOMEM;
}
static struct dp_mon_tx_ppdu_info *
ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
unsigned int ppdu_id,
enum dp_mon_tx_ppdu_info_type type)
{
struct dp_mon_tx_ppdu_info *tx_ppdu_info;
if (type == DP_MON_TX_PROT_PPDU_INFO) {
tx_ppdu_info = pmon->tx_prot_ppdu_info;
if (tx_ppdu_info && !tx_ppdu_info->is_used)
return tx_ppdu_info;
kfree(tx_ppdu_info);
} else {
tx_ppdu_info = pmon->tx_data_ppdu_info;
if (tx_ppdu_info && !tx_ppdu_info->is_used)
return tx_ppdu_info;
kfree(tx_ppdu_info);
}
/* allocate new tx_ppdu_info */
tx_ppdu_info = kzalloc(sizeof(*tx_ppdu_info), GFP_ATOMIC);
if (!tx_ppdu_info)
return NULL;
tx_ppdu_info->is_used = 0;
tx_ppdu_info->ppdu_id = ppdu_id;
if (type == DP_MON_TX_PROT_PPDU_INFO)
pmon->tx_prot_ppdu_info = tx_ppdu_info;
else
pmon->tx_data_ppdu_info = tx_ppdu_info;
return tx_ppdu_info;
}
static struct dp_mon_tx_ppdu_info *
ath12k_dp_mon_hal_tx_ppdu_info(struct ath12k_mon_data *pmon,
u16 tlv_tag)
{
switch (tlv_tag) {
case HAL_TX_FES_SETUP:
case HAL_TX_FLUSH:
case HAL_PCU_PPDU_SETUP_INIT:
case HAL_TX_PEER_ENTRY:
case HAL_TX_QUEUE_EXTENSION:
case HAL_TX_MPDU_START:
case HAL_TX_MSDU_START:
case HAL_TX_DATA:
case HAL_MON_BUF_ADDR:
case HAL_TX_MPDU_END:
case HAL_TX_LAST_MPDU_FETCHED:
case HAL_TX_LAST_MPDU_END:
case HAL_COEX_TX_REQ:
case HAL_TX_RAW_OR_NATIVE_FRAME_SETUP:
case HAL_SCH_CRITICAL_TLV_REFERENCE:
case HAL_TX_FES_SETUP_COMPLETE:
case HAL_TQM_MPDU_GLOBAL_START:
case HAL_SCHEDULER_END:
case HAL_TX_FES_STATUS_USER_PPDU:
break;
case HAL_TX_FES_STATUS_PROT: {
if (!pmon->tx_prot_ppdu_info->is_used)
pmon->tx_prot_ppdu_info->is_used = true;
return pmon->tx_prot_ppdu_info;
}
}
if (!pmon->tx_data_ppdu_info->is_used)
pmon->tx_data_ppdu_info->is_used = true;
return pmon->tx_data_ppdu_info;
}
#define MAX_MONITOR_HEADER 512
#define MAX_DUMMY_FRM_BODY 128
struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void)
{
struct sk_buff *skb;
skb = dev_alloc_skb(MAX_MONITOR_HEADER + MAX_DUMMY_FRM_BODY);
if (!skb)
return NULL;
skb_reserve(skb, MAX_MONITOR_HEADER);
if (!IS_ALIGNED((unsigned long)skb->data, 4))
skb_pull(skb, PTR_ALIGN(skb->data, 4) - skb->data);
return skb;
}
static int
ath12k_dp_mon_tx_gen_cts2self_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct sk_buff *skb;
struct ieee80211_cts *cts;
skb = ath12k_dp_mon_tx_alloc_skb();
if (!skb)
return -ENOMEM;
cts = (struct ieee80211_cts *)skb->data;
memset(cts, 0, MAX_DUMMY_FRM_BODY);
cts->frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
cts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
memcpy(cts->ra, tx_ppdu_info->rx_status.addr1, sizeof(cts->ra));
skb_put(skb, sizeof(*cts));
tx_ppdu_info->tx_mon_mpdu->head = skb;
tx_ppdu_info->tx_mon_mpdu->tail = NULL;
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
return 0;
}
static int
ath12k_dp_mon_tx_gen_rts_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct sk_buff *skb;
struct ieee80211_rts *rts;
skb = ath12k_dp_mon_tx_alloc_skb();
if (!skb)
return -ENOMEM;
rts = (struct ieee80211_rts *)skb->data;
memset(rts, 0, MAX_DUMMY_FRM_BODY);
rts->frame_control =
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
rts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
memcpy(rts->ra, tx_ppdu_info->rx_status.addr1, sizeof(rts->ra));
memcpy(rts->ta, tx_ppdu_info->rx_status.addr2, sizeof(rts->ta));
skb_put(skb, sizeof(*rts));
tx_ppdu_info->tx_mon_mpdu->head = skb;
tx_ppdu_info->tx_mon_mpdu->tail = NULL;
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
return 0;
}
static int
ath12k_dp_mon_tx_gen_3addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct sk_buff *skb;
struct ieee80211_qos_hdr *qhdr;
skb = ath12k_dp_mon_tx_alloc_skb();
if (!skb)
return -ENOMEM;
qhdr = (struct ieee80211_qos_hdr *)skb->data;
memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
qhdr->frame_control =
cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
qhdr->duration_id = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
skb_put(skb, sizeof(*qhdr));
tx_ppdu_info->tx_mon_mpdu->head = skb;
tx_ppdu_info->tx_mon_mpdu->tail = NULL;
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
return 0;
}
static int
ath12k_dp_mon_tx_gen_4addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct sk_buff *skb;
struct dp_mon_qosframe_addr4 *qhdr;
skb = ath12k_dp_mon_tx_alloc_skb();
if (!skb)
return -ENOMEM;
qhdr = (struct dp_mon_qosframe_addr4 *)skb->data;
memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
qhdr->frame_control =
cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
qhdr->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
memcpy(qhdr->addr4, tx_ppdu_info->rx_status.addr4, ETH_ALEN);
skb_put(skb, sizeof(*qhdr));
tx_ppdu_info->tx_mon_mpdu->head = skb;
tx_ppdu_info->tx_mon_mpdu->tail = NULL;
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
return 0;
}
static int
ath12k_dp_mon_tx_gen_ack_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct sk_buff *skb;
struct dp_mon_frame_min_one *fbmhdr;
skb = ath12k_dp_mon_tx_alloc_skb();
if (!skb)
return -ENOMEM;
fbmhdr = (struct dp_mon_frame_min_one *)skb->data;
memset(fbmhdr, 0, MAX_DUMMY_FRM_BODY);
fbmhdr->frame_control =
cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_CFACK);
memcpy(fbmhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
/* set duration zero for ack frame */
fbmhdr->duration = 0;
skb_put(skb, sizeof(*fbmhdr));
tx_ppdu_info->tx_mon_mpdu->head = skb;
tx_ppdu_info->tx_mon_mpdu->tail = NULL;
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
return 0;
}
static int
ath12k_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
int ret = 0;
switch (tx_ppdu_info->rx_status.medium_prot_type) {
case DP_MON_TX_MEDIUM_RTS_LEGACY:
case DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW:
case DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW:
ret = ath12k_dp_mon_tx_gen_rts_frame(tx_ppdu_info);
break;
case DP_MON_TX_MEDIUM_CTS2SELF:
ret = ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
break;
case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR:
ret = ath12k_dp_mon_tx_gen_3addr_qos_null_frame(tx_ppdu_info);
break;
case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR:
ret = ath12k_dp_mon_tx_gen_4addr_qos_null_frame(tx_ppdu_info);
break;
}
return ret;
}
static enum dp_mon_tx_tlv_status
ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
struct ath12k_mon_data *pmon,
u16 tlv_tag, u8 *tlv_data, u32 userid)
{
struct dp_mon_tx_ppdu_info *tx_ppdu_info;
enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
u32 info[7];
tx_ppdu_info = ath12k_dp_mon_hal_tx_ppdu_info(pmon, tlv_tag);
switch (tlv_tag) {
case HAL_TX_FES_SETUP: {
struct hal_tx_fes_setup *tx_fes_setup =
(struct hal_tx_fes_setup *)tlv_data;
info[0] = __le32_to_cpu(tx_fes_setup->info0);
tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id);
tx_ppdu_info->num_users =
u32_get_bits(info[0], HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
status = DP_MON_TX_FES_SETUP;
break;
}
case HAL_TX_FES_STATUS_END: {
struct hal_tx_fes_status_end *tx_fes_status_end =
(struct hal_tx_fes_status_end *)tlv_data;
u32 tst_15_0, tst_31_16;
info[0] = __le32_to_cpu(tx_fes_status_end->info0);
tst_15_0 =
u32_get_bits(info[0],
HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0);
tst_31_16 =
u32_get_bits(info[0],
HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16);
tx_ppdu_info->rx_status.ppdu_ts = (tst_15_0 | (tst_31_16 << 16));
status = DP_MON_TX_FES_STATUS_END;
break;
}
case HAL_RX_RESPONSE_REQUIRED_INFO: {
struct hal_rx_resp_req_info *rx_resp_req_info =
(struct hal_rx_resp_req_info *)tlv_data;
u32 addr_32;
u16 addr_16;
info[0] = __le32_to_cpu(rx_resp_req_info->info0);
info[1] = __le32_to_cpu(rx_resp_req_info->info1);
info[2] = __le32_to_cpu(rx_resp_req_info->info2);
info[3] = __le32_to_cpu(rx_resp_req_info->info3);
info[4] = __le32_to_cpu(rx_resp_req_info->info4);
info[5] = __le32_to_cpu(rx_resp_req_info->info5);
tx_ppdu_info->rx_status.ppdu_id =
u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_PPDU_ID);
tx_ppdu_info->rx_status.reception_type =
u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE);
tx_ppdu_info->rx_status.rx_duration =
u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_DURATION);
tx_ppdu_info->rx_status.mcs =
u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_RATE_MCS);
tx_ppdu_info->rx_status.sgi =
u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_SGI);
tx_ppdu_info->rx_status.is_stbc =
u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_STBC);
tx_ppdu_info->rx_status.ldpc =
u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_LDPC);
tx_ppdu_info->rx_status.is_ampdu =
u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_IS_AMPDU);
tx_ppdu_info->rx_status.num_users =
u32_get_bits(info[2], HAL_RX_RESP_REQ_INFO2_NUM_USER);
addr_32 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO3_ADDR1_31_0);
addr_16 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO4_ADDR1_47_32);
ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
addr_16 = u32_get_bits(info[4], HAL_RX_RESP_REQ_INFO4_ADDR1_15_0);
addr_32 = u32_get_bits(info[5], HAL_RX_RESP_REQ_INFO5_ADDR1_47_16);
ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
if (tx_ppdu_info->rx_status.reception_type == 0)
ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
break;
}
case HAL_PCU_PPDU_SETUP_INIT: {
struct hal_tx_pcu_ppdu_setup_init *ppdu_setup =
(struct hal_tx_pcu_ppdu_setup_init *)tlv_data;
u32 addr_32;
u16 addr_16;
info[0] = __le32_to_cpu(ppdu_setup->info0);
info[1] = __le32_to_cpu(ppdu_setup->info1);
info[2] = __le32_to_cpu(ppdu_setup->info2);
info[3] = __le32_to_cpu(ppdu_setup->info3);
info[4] = __le32_to_cpu(ppdu_setup->info4);
info[5] = __le32_to_cpu(ppdu_setup->info5);
info[6] = __le32_to_cpu(ppdu_setup->info6);
/* protection frame address 1 */
addr_32 = u32_get_bits(info[1],
HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0);
addr_16 = u32_get_bits(info[2],
HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32);
ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
/* protection frame address 2 */
addr_16 = u32_get_bits(info[2],
HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0);
addr_32 = u32_get_bits(info[3],
HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16);
ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
/* protection frame address 3 */
addr_32 = u32_get_bits(info[4],
HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0);
addr_16 = u32_get_bits(info[5],
HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32);
ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr3);
/* protection frame address 4 */
addr_16 = u32_get_bits(info[5],
HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0);
addr_32 = u32_get_bits(info[6],
HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16);
ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr4);
status = u32_get_bits(info[0],
HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE);
break;
}
case HAL_TX_QUEUE_EXTENSION: {
struct hal_tx_queue_exten *tx_q_exten =
(struct hal_tx_queue_exten *)tlv_data;
info[0] = __le32_to_cpu(tx_q_exten->info0);
tx_ppdu_info->rx_status.frame_control =
u32_get_bits(info[0],
HAL_TX_Q_EXT_INFO0_FRAME_CTRL);
tx_ppdu_info->rx_status.fc_valid = true;
break;
}
case HAL_TX_FES_STATUS_START: {
struct hal_tx_fes_status_start *tx_fes_start =
(struct hal_tx_fes_status_start *)tlv_data;
info[0] = __le32_to_cpu(tx_fes_start->info0);
tx_ppdu_info->rx_status.medium_prot_type =
u32_get_bits(info[0],
HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE);
break;
}
case HAL_TX_FES_STATUS_PROT: {
struct hal_tx_fes_status_prot *tx_fes_status =
(struct hal_tx_fes_status_prot *)tlv_data;
u32 start_timestamp;
u32 end_timestamp;
info[0] = __le32_to_cpu(tx_fes_status->info0);
info[1] = __le32_to_cpu(tx_fes_status->info1);
start_timestamp =
u32_get_bits(info[0],
HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0);
start_timestamp |=
u32_get_bits(info[0],
HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16) << 15;
end_timestamp =
u32_get_bits(info[1],
HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0);
end_timestamp |=
u32_get_bits(info[1],
HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16) << 15;
tx_ppdu_info->rx_status.rx_duration = end_timestamp - start_timestamp;
ath12k_dp_mon_tx_gen_prot_frame(tx_ppdu_info);
break;
}
case HAL_TX_FES_STATUS_START_PPDU:
case HAL_TX_FES_STATUS_START_PROT: {
struct hal_tx_fes_status_start_prot *tx_fes_stat_start =
(struct hal_tx_fes_status_start_prot *)tlv_data;
u64 ppdu_ts;
info[0] = __le32_to_cpu(tx_fes_stat_start->info0);
tx_ppdu_info->rx_status.ppdu_ts =
u32_get_bits(info[0],
HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32);
ppdu_ts = (u32_get_bits(info[1],
HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32));
tx_ppdu_info->rx_status.ppdu_ts |= ppdu_ts << 32;
break;
}
case HAL_TX_FES_STATUS_USER_PPDU: {
struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu =
(struct hal_tx_fes_status_user_ppdu *)tlv_data;
info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0);
tx_ppdu_info->rx_status.rx_duration =
u32_get_bits(info[0],
HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION);
break;
}
case HAL_MACTX_HE_SIG_A_SU:
ath12k_dp_mon_parse_he_sig_su(tlv_data, &tx_ppdu_info->rx_status);
break;
case HAL_MACTX_HE_SIG_A_MU_DL:
ath12k_dp_mon_parse_he_sig_mu(tlv_data, &tx_ppdu_info->rx_status);
break;
case HAL_MACTX_HE_SIG_B1_MU:
ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, &tx_ppdu_info->rx_status);
break;
case HAL_MACTX_HE_SIG_B2_MU:
ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, &tx_ppdu_info->rx_status);
break;
case HAL_MACTX_HE_SIG_B2_OFDMA:
ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, &tx_ppdu_info->rx_status);
break;
case HAL_MACTX_VHT_SIG_A:
ath12k_dp_mon_parse_vht_sig_a(tlv_data, &tx_ppdu_info->rx_status);
break;
case HAL_MACTX_L_SIG_A:
ath12k_dp_mon_parse_l_sig_a(tlv_data, &tx_ppdu_info->rx_status);
break;
case HAL_MACTX_L_SIG_B:
ath12k_dp_mon_parse_l_sig_b(tlv_data, &tx_ppdu_info->rx_status);
break;
case HAL_RX_FRAME_BITMAP_ACK: {
struct hal_rx_frame_bitmap_ack *fbm_ack =
(struct hal_rx_frame_bitmap_ack *)tlv_data;
u32 addr_32;
u16 addr_16;
info[0] = __le32_to_cpu(fbm_ack->info0);
info[1] = __le32_to_cpu(fbm_ack->info1);
addr_32 = u32_get_bits(info[0],
HAL_RX_FBM_ACK_INFO0_ADDR1_31_0);
addr_16 = u32_get_bits(info[1],
HAL_RX_FBM_ACK_INFO1_ADDR1_47_32);
ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
ath12k_dp_mon_tx_gen_ack_frame(tx_ppdu_info);
break;
}
case HAL_MACTX_PHY_DESC: {
struct hal_tx_phy_desc *tx_phy_desc =
(struct hal_tx_phy_desc *)tlv_data;
info[0] = __le32_to_cpu(tx_phy_desc->info0);
info[1] = __le32_to_cpu(tx_phy_desc->info1);
info[2] = __le32_to_cpu(tx_phy_desc->info2);
info[3] = __le32_to_cpu(tx_phy_desc->info3);
tx_ppdu_info->rx_status.beamformed =
u32_get_bits(info[0],
HAL_TX_PHY_DESC_INFO0_BF_TYPE);
tx_ppdu_info->rx_status.preamble_type =
u32_get_bits(info[0],
HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B);
tx_ppdu_info->rx_status.mcs =
u32_get_bits(info[1],
HAL_TX_PHY_DESC_INFO1_MCS);
tx_ppdu_info->rx_status.ltf_size =
u32_get_bits(info[3],
HAL_TX_PHY_DESC_INFO3_LTF_SIZE);
tx_ppdu_info->rx_status.nss =
u32_get_bits(info[2],
HAL_TX_PHY_DESC_INFO2_NSS);
tx_ppdu_info->rx_status.chan_num =
u32_get_bits(info[3],
HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL);
tx_ppdu_info->rx_status.bw =
u32_get_bits(info[0],
HAL_TX_PHY_DESC_INFO0_BANDWIDTH);
break;
}
case HAL_TX_MPDU_START: {
struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
if (!mon_mpdu)
return DP_MON_TX_STATUS_PPDU_NOT_DONE;
status = DP_MON_TX_MPDU_START;
break;
}
case HAL_MON_BUF_ADDR: {
struct dp_rxdma_ring *buf_ring = &ab->dp.tx_mon_buf_ring;
struct dp_mon_packet_info *packet_info =
(struct dp_mon_packet_info *)tlv_data;
int buf_id = u32_get_bits(packet_info->cookie,
DP_RXDMA_BUF_COOKIE_BUF_ID);
struct sk_buff *msdu;
struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
struct ath12k_skb_rxcb *rxcb;
spin_lock_bh(&buf_ring->idr_lock);
msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
spin_unlock_bh(&buf_ring->idr_lock);
if (unlikely(!msdu)) {
ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
buf_id);
return DP_MON_TX_STATUS_PPDU_NOT_DONE;
}
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
if (!mon_mpdu->head)
mon_mpdu->head = msdu;
else if (mon_mpdu->tail)
mon_mpdu->tail->next = msdu;
mon_mpdu->tail = msdu;
ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
status = DP_MON_TX_BUFFER_ADDR;
break;
}
case HAL_TX_MPDU_END:
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
break;
}
return status;
}
enum dp_mon_tx_tlv_status
ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
struct hal_tlv_hdr *tx_tlv,
u8 *num_users)
{
u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
u32 info0;
switch (tlv_tag) {
case HAL_TX_FES_SETUP: {
struct hal_tx_fes_setup *tx_fes_setup =
(struct hal_tx_fes_setup *)tx_tlv;
info0 = __le32_to_cpu(tx_fes_setup->info0);
*num_users = u32_get_bits(info0, HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
tlv_status = DP_MON_TX_FES_SETUP;
break;
}
case HAL_RX_RESPONSE_REQUIRED_INFO: {
/* TODO: need to update *num_users */
tlv_status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
break;
}
}
return tlv_status;
}
static void
ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
struct napi_struct *napi,
struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct dp_mon_mpdu *tmp, *mon_mpdu;
struct sk_buff *head_msdu;
list_for_each_entry_safe(mon_mpdu, tmp,
&tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
head_msdu = mon_mpdu->head;
if (head_msdu)
ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
&tx_ppdu_info->rx_status, napi);
kfree(mon_mpdu);
}
}
enum hal_rx_mon_status
ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
int mac_id,
struct sk_buff *skb,
struct napi_struct *napi,
u32 ppdu_id)
{
struct ath12k_base *ab = ar->ab;
struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info, *tx_data_ppdu_info;
struct hal_tlv_hdr *tlv;
u8 *ptr = skb->data;
u16 tlv_tag;
u16 tlv_len;
u32 tlv_userid = 0;
u8 num_user;
u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
tx_prot_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
DP_MON_TX_PROT_PPDU_INFO);
if (!tx_prot_ppdu_info)
return -ENOMEM;
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
tlv_status = ath12k_dp_mon_tx_status_get_num_user(tlv_tag, tlv, &num_user);
if (tlv_status == DP_MON_TX_STATUS_PPDU_NOT_DONE || !num_user)
return -EINVAL;
tx_data_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
DP_MON_TX_DATA_PPDU_INFO);
if (!tx_data_ppdu_info)
return -ENOMEM;
do {
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
tlv_status = ath12k_dp_mon_tx_parse_status_tlv(ab, pmon,
tlv_tag, ptr,
tlv_userid);
ptr += tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
if ((ptr - skb->data) >= DP_TX_MONITOR_BUF_SIZE)
break;
} while (tlv_status != DP_MON_TX_FES_STATUS_END);
ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info);
ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info);
return tlv_status;
}
int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
enum dp_monitor_mode monitor_mode,
struct napi_struct *napi)
{
struct hal_mon_dest_desc *mon_dst_desc;
struct ath12k_pdev_dp *pdev_dp = &ar->dp;
struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
struct sk_buff *skb;
struct ath12k_skb_rxcb *rxcb;
struct dp_srng *mon_dst_ring;
struct hal_srng *srng;
struct dp_rxdma_ring *buf_ring;
u64 cookie;
u32 ppdu_id;
int num_buffs_reaped = 0, srng_id, buf_id;
u8 dest_idx = 0, i;
bool end_of_ppdu;
struct hal_rx_mon_ppdu_info *ppdu_info;
struct ath12k_peer *peer = NULL;
ppdu_info = &pmon->mon_ppdu_info;
memset(ppdu_info, 0, sizeof(*ppdu_info));
ppdu_info->peer_id = HAL_INVALID_PEERID;
srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) {
mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
buf_ring = &dp->rxdma_mon_buf_ring;
} else {
mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id];
buf_ring = &dp->tx_mon_buf_ring;
}
srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
while (likely(*budget)) {
*budget -= 1;
mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
if (unlikely(!mon_dst_desc))
break;
cookie = le32_to_cpu(mon_dst_desc->cookie);
buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
spin_lock_bh(&buf_ring->idr_lock);
skb = idr_remove(&buf_ring->bufs_idr, buf_id);
spin_unlock_bh(&buf_ring->idr_lock);
if (unlikely(!skb)) {
ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
buf_id);
goto move_next;
}
rxcb = ATH12K_SKB_RXCB(skb);
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
pmon->dest_skb_q[dest_idx] = skb;
dest_idx++;
ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id);
end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
HAL_MON_DEST_INFO0_END_OF_PPDU);
if (!end_of_ppdu)
continue;
for (i = 0; i < dest_idx; i++) {
skb = pmon->dest_skb_q[i];
if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id,
skb, napi);
else
ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id,
skb, napi, ppdu_id);
peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
if (!peer || !peer->sta) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"failed to find the peer with peer_id %d\n",
ppdu_info->peer_id);
dev_kfree_skb_any(skb);
continue;
}
dev_kfree_skb_any(skb);
pmon->dest_skb_q[i] = NULL;
}
dest_idx = 0;
move_next:
ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
ath12k_hal_srng_src_get_next_entry(ab, srng);
num_buffs_reaped++;
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return num_buffs_reaped;
}
static void
ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *user_stats,
u32 num_msdu)
{
u32 rate_idx = 0;
u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs;
u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1;
u32 bw_idx = ppdu_info->bw;
u32 gi_idx = ppdu_info->gi;
if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) ||
(bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) {
return;
}
if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N ||
ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) {
rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx;
rate_idx += bw_idx * 2 + gi_idx;
} else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) {
gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi);
rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx;
rate_idx += bw_idx * 3 + gi_idx;
} else {
return;
}
rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu;
if (user_stats)
rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count;
else
rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len;
}
static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
struct ath12k_sta *arsta,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
u32 num_msdu;
if (!rx_stats)
return;
arsta->rssi_comb = ppdu_info->rssi_comb;
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
rx_stats->num_msdu += num_msdu;
rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
ppdu_info->tcp_ack_msdu_count;
rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
ppdu_info->nss = 1;
ppdu_info->mcs = HAL_RX_MAX_MCS;
ppdu_info->tid = IEEE80211_NUM_TIDS;
}
if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
rx_stats->tid_count[ppdu_info->tid] += num_msdu;
if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
if (ppdu_info->is_stbc)
rx_stats->stbc_count += num_msdu;
if (ppdu_info->beamformed)
rx_stats->beamformed_count += num_msdu;
if (ppdu_info->num_mpdu_fcs_ok > 1)
rx_stats->ampdu_msdu_count += num_msdu;
else
rx_stats->non_ampdu_msdu_count += num_msdu;
rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
rx_stats->dcm_count += ppdu_info->dcm;
rx_stats->rx_duration += ppdu_info->rx_duration;
arsta->rx_duration = rx_stats->rx_duration;
if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) {
rx_stats->pkt_stats.nss_count[ppdu_info->nss - 1] += num_msdu;
rx_stats->byte_stats.nss_count[ppdu_info->nss - 1] += ppdu_info->mpdu_len;
}
if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N &&
ppdu_info->mcs <= HAL_RX_MAX_MCS_HT) {
rx_stats->pkt_stats.ht_mcs_count[ppdu_info->mcs] += num_msdu;
rx_stats->byte_stats.ht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
/* To fit into rate table for HT packets */
ppdu_info->mcs = ppdu_info->mcs % 8;
}
if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC &&
ppdu_info->mcs <= HAL_RX_MAX_MCS_VHT) {
rx_stats->pkt_stats.vht_mcs_count[ppdu_info->mcs] += num_msdu;
rx_stats->byte_stats.vht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
}
if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX &&
ppdu_info->mcs <= HAL_RX_MAX_MCS_HE) {
rx_stats->pkt_stats.he_mcs_count[ppdu_info->mcs] += num_msdu;
rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
}
if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) &&
ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) {
rx_stats->pkt_stats.legacy_count[ppdu_info->rate] += num_msdu;
rx_stats->byte_stats.legacy_count[ppdu_info->rate] += ppdu_info->mpdu_len;
}
if (ppdu_info->gi < HAL_RX_GI_MAX) {
rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu;
rx_stats->byte_stats.gi_count[ppdu_info->gi] += ppdu_info->mpdu_len;
}
if (ppdu_info->bw < HAL_RX_BW_MAX) {
rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu;
rx_stats->byte_stats.bw_count[ppdu_info->bw] += ppdu_info->mpdu_len;
}
ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info,
NULL, num_msdu);
}
void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_user_status *rx_user_status;
u32 num_users, i, mu_ul_user_v0_word0, mu_ul_user_v0_word1, ru_size;
if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
return;
num_users = ppdu_info->num_users;
if (num_users > HAL_MAX_UL_MU_USERS)
num_users = HAL_MAX_UL_MU_USERS;
for (i = 0; i < num_users; i++) {
rx_user_status = &ppdu_info->userstats[i];
mu_ul_user_v0_word0 =
rx_user_status->ul_ofdma_user_v0_word0;
mu_ul_user_v0_word1 =
rx_user_status->ul_ofdma_user_v0_word1;
if (u32_get_bits(mu_ul_user_v0_word0,
HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID) &&
!u32_get_bits(mu_ul_user_v0_word0,
HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER)) {
rx_user_status->mcs =
u32_get_bits(mu_ul_user_v0_word1,
HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS);
rx_user_status->nss =
u32_get_bits(mu_ul_user_v0_word1,
HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS) + 1;
rx_user_status->ofdma_info_valid = 1;
rx_user_status->ul_ofdma_ru_start_index =
u32_get_bits(mu_ul_user_v0_word1,
HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START);
ru_size = u32_get_bits(mu_ul_user_v0_word1,
HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE);
rx_user_status->ul_ofdma_ru_width = ru_size;
rx_user_status->ul_ofdma_ru_size = ru_size;
}
rx_user_status->ldpc = u32_get_bits(mu_ul_user_v0_word1,
HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC);
}
ppdu_info->ldpc = 1;
}
static void
ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppdu_info,
u32 uid)
{
struct ath12k_sta *arsta = NULL;
struct ath12k_rx_peer_stats *rx_stats = NULL;
struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
struct ath12k_peer *peer;
u32 num_msdu;
if (user_stats->ast_index == 0 || user_stats->ast_index == 0xFFFF)
return;
peer = ath12k_peer_find_by_ast(ar->ab, user_stats->ast_index);
if (!peer) {
ath12k_warn(ar->ab, "peer ast idx %d can't be found\n",
user_stats->ast_index);
return;
}
arsta = (struct ath12k_sta *)peer->sta->drv_priv;
rx_stats = arsta->rx_stats;
if (!rx_stats)
return;
arsta->rssi_comb = ppdu_info->rssi_comb;
num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
user_stats->udp_msdu_count + user_stats->other_msdu_count;
rx_stats->num_msdu += num_msdu;
rx_stats->tcp_msdu_count += user_stats->tcp_msdu_count +
user_stats->tcp_ack_msdu_count;
rx_stats->udp_msdu_count += user_stats->udp_msdu_count;
rx_stats->other_msdu_count += user_stats->other_msdu_count;
if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
if (user_stats->tid <= IEEE80211_NUM_TIDS)
rx_stats->tid_count[user_stats->tid] += num_msdu;
if (user_stats->preamble_type < HAL_RX_PREAMBLE_MAX)
rx_stats->pream_cnt[user_stats->preamble_type] += num_msdu;
if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
if (ppdu_info->is_stbc)
rx_stats->stbc_count += num_msdu;
if (ppdu_info->beamformed)
rx_stats->beamformed_count += num_msdu;
if (user_stats->mpdu_cnt_fcs_ok > 1)
rx_stats->ampdu_msdu_count += num_msdu;
else
rx_stats->non_ampdu_msdu_count += num_msdu;
rx_stats->num_mpdu_fcs_ok += user_stats->mpdu_cnt_fcs_ok;
rx_stats->num_mpdu_fcs_err += user_stats->mpdu_cnt_fcs_err;
rx_stats->dcm_count += ppdu_info->dcm;
if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO)
rx_stats->ru_alloc_cnt[user_stats->ul_ofdma_ru_size] += num_msdu;
rx_stats->rx_duration += ppdu_info->rx_duration;
arsta->rx_duration = rx_stats->rx_duration;
if (user_stats->nss > 0 && user_stats->nss <= HAL_RX_MAX_NSS) {
rx_stats->pkt_stats.nss_count[user_stats->nss - 1] += num_msdu;
rx_stats->byte_stats.nss_count[user_stats->nss - 1] +=
user_stats->mpdu_ok_byte_count;
}
if (user_stats->preamble_type == HAL_RX_PREAMBLE_11AX &&
user_stats->mcs <= HAL_RX_MAX_MCS_HE) {
rx_stats->pkt_stats.he_mcs_count[user_stats->mcs] += num_msdu;
rx_stats->byte_stats.he_mcs_count[user_stats->mcs] +=
user_stats->mpdu_ok_byte_count;
}
if (ppdu_info->gi < HAL_RX_GI_MAX) {
rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu;
rx_stats->byte_stats.gi_count[ppdu_info->gi] +=
user_stats->mpdu_ok_byte_count;
}
if (ppdu_info->bw < HAL_RX_BW_MAX) {
rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu;
rx_stats->byte_stats.bw_count[ppdu_info->bw] +=
user_stats->mpdu_ok_byte_count;
}
ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info,
user_stats, num_msdu);
}
static void
ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
u32 num_users, i;
num_users = ppdu_info->num_users;
if (num_users > HAL_MAX_UL_MU_USERS)
num_users = HAL_MAX_UL_MU_USERS;
for (i = 0; i < num_users; i++)
ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i);
}
int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
struct napi_struct *napi, int *budget)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev_dp *pdev_dp = &ar->dp;
struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct ath12k_dp *dp = &ab->dp;
struct hal_mon_dest_desc *mon_dst_desc;
struct sk_buff *skb;
struct ath12k_skb_rxcb *rxcb;
struct dp_srng *mon_dst_ring;
struct hal_srng *srng;
struct dp_rxdma_ring *buf_ring;
struct ath12k_sta *arsta = NULL;
struct ath12k_peer *peer;
u64 cookie;
int num_buffs_reaped = 0, srng_id, buf_id;
u8 dest_idx = 0, i;
bool end_of_ppdu;
u32 hal_status;
srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
buf_ring = &dp->rxdma_mon_buf_ring;
srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
while (likely(*budget)) {
*budget -= 1;
mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
if (unlikely(!mon_dst_desc))
break;
cookie = le32_to_cpu(mon_dst_desc->cookie);
buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
spin_lock_bh(&buf_ring->idr_lock);
skb = idr_remove(&buf_ring->bufs_idr, buf_id);
spin_unlock_bh(&buf_ring->idr_lock);
if (unlikely(!skb)) {
ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
buf_id);
goto move_next;
}
rxcb = ATH12K_SKB_RXCB(skb);
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
pmon->dest_skb_q[dest_idx] = skb;
dest_idx++;
end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
HAL_MON_DEST_INFO0_END_OF_PPDU);
if (!end_of_ppdu)
continue;
for (i = 0; i < dest_idx; i++) {
skb = pmon->dest_skb_q[i];
hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
dev_kfree_skb_any(skb);
continue;
}
rcu_read_lock();
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
if (!peer || !peer->sta) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"failed to find the peer with peer_id %d\n",
ppdu_info->peer_id);
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
dev_kfree_skb_any(skb);
continue;
}
if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
arsta = (struct ath12k_sta *)peer->sta->drv_priv;
ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
ppdu_info);
} else if ((ppdu_info->fc_valid) &&
(ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
}
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
dev_kfree_skb_any(skb);
memset(ppdu_info, 0, sizeof(*ppdu_info));
ppdu_info->peer_id = HAL_INVALID_PEERID;
}
dest_idx = 0;
move_next:
ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
ath12k_hal_srng_src_get_next_entry(ab, srng);
num_buffs_reaped++;
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return num_buffs_reaped;
}
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode)
{
struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id);
int num_buffs_reaped = 0;
if (!ar->monitor_started)
ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget);
else
num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget,
monitor_mode, napi);
return num_buffs_reaped;
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/dp_mon.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include "core.h"
#include "debug.h"
/* World regdom to be used in case default regd from fw is unavailable */
#define ATH12K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
#define ATH12K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ATH12K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ETSI_WEATHER_RADAR_BAND_LOW 5590
#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
static const struct ieee80211_regdomain ath12k_world_regd = {
.n_reg_rules = 3,
.alpha2 = "00",
.reg_rules = {
ATH12K_2GHZ_CH01_11,
ATH12K_5GHZ_5150_5350,
ATH12K_5GHZ_5725_5850,
}
};
static bool ath12k_regdom_changes(struct ath12k *ar, char *alpha2)
{
const struct ieee80211_regdomain *regd;
regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
/* This can happen during wiphy registration where the previous
* user request is received before we update the regd received
* from firmware.
*/
if (!regd)
return true;
return memcmp(regd->alpha2, alpha2, 2) != 0;
}
static void
ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
struct ath12k *ar = hw->priv;
int ret;
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
* self managed wiphy's
*/
if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
ath12k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
return;
}
if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
"Country Setting is not allowed\n");
return;
}
if (!ath12k_regdom_changes(ar, request->alpha2)) {
ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Country is already set\n");
return;
}
/* Set the country code to the firmware and wait for
* the WMI_REG_CHAN_LIST_CC EVENT for updating the
* reg info
*/
arg.flags = ALPHA_IS_SET;
memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
arg.cc_info.alpha2[2] = 0;
ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
if (ret)
ath12k_warn(ar->ab,
"INIT Country code set to fw failed : %d\n", ret);
}
int ath12k_reg_update_chan_list(struct ath12k *ar)
{
struct ieee80211_supported_band **bands;
struct ath12k_wmi_scan_chan_list_arg *arg;
struct ieee80211_channel *channel;
struct ieee80211_hw *hw = ar->hw;
struct ath12k_wmi_channel_arg *ch;
enum nl80211_band band;
int num_channels = 0;
int i, ret;
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
if (bands[band]->channels[i].flags &
IEEE80211_CHAN_DISABLED)
continue;
num_channels++;
}
}
if (WARN_ON(!num_channels))
return -EINVAL;
arg = kzalloc(struct_size(arg, channel, num_channels), GFP_KERNEL);
if (!arg)
return -ENOMEM;
arg->pdev_id = ar->pdev->pdev_id;
arg->nallchans = num_channels;
ch = arg->channel;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
channel = &bands[band]->channels[i];
if (channel->flags & IEEE80211_CHAN_DISABLED)
continue;
/* TODO: Set to true/false based on some condition? */
ch->allow_ht = true;
ch->allow_vht = true;
ch->allow_he = true;
ch->dfs_set =
!!(channel->flags & IEEE80211_CHAN_RADAR);
ch->is_chan_passive = !!(channel->flags &
IEEE80211_CHAN_NO_IR);
ch->is_chan_passive |= ch->dfs_set;
ch->mhz = channel->center_freq;
ch->cfreq1 = channel->center_freq;
ch->minpower = 0;
ch->maxpower = channel->max_power * 2;
ch->maxregpower = channel->max_reg_power * 2;
ch->antennamax = channel->max_antenna_gain * 2;
/* TODO: Use appropriate phymodes */
if (channel->band == NL80211_BAND_2GHZ)
ch->phy_mode = MODE_11G;
else
ch->phy_mode = MODE_11A;
if (channel->band == NL80211_BAND_6GHZ &&
cfg80211_channel_is_psc(channel))
ch->psc_channel = true;
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
i, arg->nallchans,
ch->mhz, ch->maxpower, ch->maxregpower,
ch->antennamax, ch->phy_mode);
ch++;
/* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
* set_agile, reg_class_idx
*/
}
}
ret = ath12k_wmi_send_scan_chan_list_cmd(ar, arg);
kfree(arg);
return ret;
}
static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
struct ieee80211_regdomain *regd_copy)
{
u8 i;
/* The caller should have checked error conditions */
memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
for (i = 0; i < regd_orig->n_reg_rules; i++)
memcpy(®d_copy->reg_rules[i], ®d_orig->reg_rules[i],
sizeof(struct ieee80211_reg_rule));
}
int ath12k_regd_update(struct ath12k *ar, bool init)
{
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
ab = ar->ab;
pdev_id = ar->pdev_idx;
spin_lock_bh(&ab->base_lock);
if (init) {
/* Apply the regd received during init through
* WMI_REG_CHAN_LIST_CC event. In case of failure to
* receive the regd, initialize with a default world
* regulatory.
*/
if (ab->default_regd[pdev_id]) {
regd = ab->default_regd[pdev_id];
} else {
ath12k_warn(ab,
"failed to receive default regd during init\n");
regd = (struct ieee80211_regdomain *)&ath12k_world_regd;
}
} else {
regd = ab->new_regd[pdev_id];
}
if (!regd) {
ret = -EINVAL;
spin_unlock_bh(&ab->base_lock);
goto err;
}
regd_len = sizeof(*regd) + (regd->n_reg_rules *
sizeof(struct ieee80211_reg_rule));
regd_copy = kzalloc(regd_len, GFP_ATOMIC);
if (regd_copy)
ath12k_copy_regd(regd, regd_copy);
spin_unlock_bh(&ab->base_lock);
if (!regd_copy) {
ret = -ENOMEM;
goto err;
}
rtnl_lock();
wiphy_lock(ar->hw->wiphy);
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();
kfree(regd_copy);
if (ret)
goto err;
if (ar->state == ATH12K_STATE_ON) {
ret = ath12k_reg_update_chan_list(ar);
if (ret)
goto err;
}
return 0;
err:
ath12k_warn(ab, "failed to perform regd update : %d\n", ret);
return ret;
}
static enum nl80211_dfs_regions
ath12k_map_fw_dfs_region(enum ath12k_dfs_region dfs_region)
{
switch (dfs_region) {
case ATH12K_DFS_REG_FCC:
case ATH12K_DFS_REG_CN:
return NL80211_DFS_FCC;
case ATH12K_DFS_REG_ETSI:
case ATH12K_DFS_REG_KR:
return NL80211_DFS_ETSI;
case ATH12K_DFS_REG_MKK:
case ATH12K_DFS_REG_MKK_N:
return NL80211_DFS_JP;
default:
return NL80211_DFS_UNSET;
}
}
static u32 ath12k_map_fw_reg_flags(u16 reg_flags)
{
u32 flags = 0;
if (reg_flags & REGULATORY_CHAN_NO_IR)
flags = NL80211_RRF_NO_IR;
if (reg_flags & REGULATORY_CHAN_RADAR)
flags |= NL80211_RRF_DFS;
if (reg_flags & REGULATORY_CHAN_NO_OFDM)
flags |= NL80211_RRF_NO_OFDM;
if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
flags |= NL80211_RRF_NO_OUTDOOR;
if (reg_flags & REGULATORY_CHAN_NO_HT40)
flags |= NL80211_RRF_NO_HT40;
if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
flags |= NL80211_RRF_NO_80MHZ;
if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
flags |= NL80211_RRF_NO_160MHZ;
return flags;
}
static bool
ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
if ((start_freq1 >= start_freq2 &&
start_freq1 < end_freq2) ||
(start_freq2 > start_freq1 &&
start_freq2 < end_freq1))
return true;
/* TODO: Should we restrict intersection feasibility
* based on min bandwidth of the intersected region also,
* say the intersected rule should have a min bandwidth
* of 20MHz?
*/
return false;
}
static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2,
struct ieee80211_reg_rule *new_rule)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
u32 freq_diff, max_bw;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
start_freq2);
new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
freq_diff = new_rule->freq_range.end_freq_khz -
new_rule->freq_range.start_freq_khz;
max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
rule2->freq_range.max_bandwidth_khz);
new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
new_rule->power_rule.max_antenna_gain =
min_t(u32, rule1->power_rule.max_antenna_gain,
rule2->power_rule.max_antenna_gain);
new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
rule2->power_rule.max_eirp);
/* Use the flags of both the rules */
new_rule->flags = rule1->flags | rule2->flags;
/* To be safe, lts use the max cac timeout of both rules */
new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
}
static struct ieee80211_regdomain *
ath12k_regd_intersect(struct ieee80211_regdomain *default_regd,
struct ieee80211_regdomain *curr_regd)
{
u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
struct ieee80211_regdomain *new_regd = NULL;
u8 i, j, k;
num_old_regd_rules = default_regd->n_reg_rules;
num_curr_regd_rules = curr_regd->n_reg_rules;
num_new_regd_rules = 0;
/* Find the number of intersecting rules to allocate new regd memory */
for (i = 0; i < num_old_regd_rules; i++) {
old_rule = default_regd->reg_rules + i;
for (j = 0; j < num_curr_regd_rules; j++) {
curr_rule = curr_regd->reg_rules + j;
if (ath12k_reg_can_intersect(old_rule, curr_rule))
num_new_regd_rules++;
}
}
if (!num_new_regd_rules)
return NULL;
new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!new_regd)
return NULL;
/* We set the new country and dfs region directly and only trim
* the freq, power, antenna gain by intersecting with the
* default regdomain. Also MAX of the dfs cac timeout is selected.
*/
new_regd->n_reg_rules = num_new_regd_rules;
memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
new_regd->dfs_region = curr_regd->dfs_region;
new_rule = new_regd->reg_rules;
for (i = 0, k = 0; i < num_old_regd_rules; i++) {
old_rule = default_regd->reg_rules + i;
for (j = 0; j < num_curr_regd_rules; j++) {
curr_rule = curr_regd->reg_rules + j;
if (ath12k_reg_can_intersect(old_rule, curr_rule))
ath12k_reg_intersect_rules(old_rule, curr_rule,
(new_rule + k++));
}
}
return new_regd;
}
static const char *
ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
{
switch (dfs_region) {
case NL80211_DFS_FCC:
return "FCC";
case NL80211_DFS_ETSI:
return "ETSI";
case NL80211_DFS_JP:
return "JP";
default:
return "UNSET";
}
}
static u16
ath12k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
{
u16 bw;
bw = end_freq - start_freq;
bw = min_t(u16, bw, max_bw);
if (bw >= 80 && bw < 160)
bw = 80;
else if (bw >= 40 && bw < 80)
bw = 40;
else if (bw < 40)
bw = 20;
return bw;
}
static void
ath12k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
reg_rule->flags = reg_flags;
}
static void
ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
struct ieee80211_regdomain *regd,
struct ath12k_reg_rule *reg_rule,
u8 *rule_idx, u32 flags, u16 max_bw)
{
u32 end_freq;
u16 bw;
u8 i;
i = *rule_idx;
bw = ath12k_reg_adjust_bw(reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
ath12k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
else
end_freq = reg_rule->end_freq;
bw = ath12k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
max_bw);
i++;
ath12k_reg_update_rule(regd->reg_rules + i,
ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
if (end_freq == reg_rule->end_freq) {
regd->n_reg_rules--;
*rule_idx = i;
return;
}
bw = ath12k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, max_bw);
i++;
ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
*rule_idx = i;
}
struct ieee80211_regdomain *
ath12k_reg_build_regd(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info, bool intersect)
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
struct ath12k_reg_rule *reg_rule;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
u32 flags;
char alpha2[3];
num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
/* FIXME: Currently taking reg rules for 6G only from Indoor AP mode list.
* This can be updated to choose the combination dynamically based on AP
* type and client type, after complete 6G regulatory support is added.
*/
if (reg_info->is_ext_reg_event)
num_rules += reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP];
if (!num_rules)
goto ret;
/* Add max additional rules to accommodate weather radar band */
if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI)
num_rules += 2;
tmp_regd = kzalloc(sizeof(*tmp_regd) +
(num_rules * sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!tmp_regd)
goto ret;
memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
alpha2[2] = '\0';
tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region),
reg_info->dfs_region, num_rules);
/* Update reg_rules[] below. Firmware is expected to
* send these rules in order(2G rules first and then 5G)
*/
for (; i < num_rules; i++) {
if (reg_info->num_2g_reg_rules &&
(i < reg_info->num_2g_reg_rules)) {
reg_rule = reg_info->reg_rules_2g_ptr + i;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_2g);
flags = 0;
} else if (reg_info->num_5g_reg_rules &&
(j < reg_info->num_5g_reg_rules)) {
reg_rule = reg_info->reg_rules_5g_ptr + j++;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_5g);
/* FW doesn't pass NL80211_RRF_AUTO_BW flag for
* BW Auto correction, we can enable this by default
* for all 5G rules here. The regulatory core performs
* BW correction if required and applies flags as
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
} else if (reg_info->is_ext_reg_event &&
reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] &&
(k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) {
reg_rule = reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP] + k++;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]);
flags = NL80211_RRF_AUTO_BW;
} else {
break;
}
flags |= ath12k_map_fw_reg_flags(reg_rule->flags);
ath12k_reg_update_rule(tmp_regd->reg_rules + i,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
* Default value of '0' corresponds to 60s timeout, so no
* need to update that for other rules.
*/
if (flags & NL80211_RRF_DFS &&
reg_info->dfs_region == ATH12K_DFS_REG_ETSI &&
(reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
ath12k_reg_update_weather_radar_band(ab, tmp_regd,
reg_rule, &i,
flags, max_bw);
continue;
}
if (reg_info->is_ext_reg_event) {
ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms,
flags, reg_rule->psd_flag, reg_rule->psd_eirp);
} else {
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms,
flags);
}
}
tmp_regd->n_reg_rules = i;
if (intersect) {
default_regd = ab->default_regd[reg_info->phy_id];
/* Get a new regd by intersecting the received regd with
* our default regd.
*/
new_regd = ath12k_regd_intersect(default_regd, tmp_regd);
kfree(tmp_regd);
if (!new_regd) {
ath12k_warn(ab, "Unable to create intersected regdomain\n");
goto ret;
}
} else {
new_regd = tmp_regd;
}
ret:
return new_regd;
}
void ath12k_regd_update_work(struct work_struct *work)
{
struct ath12k *ar = container_of(work, struct ath12k,
regd_update_work);
int ret;
ret = ath12k_regd_update(ar, false);
if (ret) {
/* Firmware has already moved to the new regd. We need
* to maintain channel consistency across FW, Host driver
* and userspace. Hence as a fallback mechanism we can set
* the prev or default country code to the firmware.
*/
/* TODO: Implement Fallback Mechanism */
}
}
void ath12k_reg_init(struct ath12k *ar)
{
ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
ar->hw->wiphy->reg_notifier = ath12k_reg_notifier;
}
void ath12k_reg_free(struct ath12k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params->max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
}
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/reg.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include <linux/of.h>
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
int ath12k_core_suspend(struct ath12k_base *ab)
{
int ret;
if (!ab->hw_params->supports_suspend)
return -EOPNOTSUPP;
/* TODO: there can frames in queues so for now add delay as a hack.
* Need to implement to handle and remove this delay.
*/
msleep(500);
ret = ath12k_dp_rx_pktlog_stop(ab, true);
if (ret) {
ath12k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
ret);
return ret;
}
ret = ath12k_dp_rx_pktlog_stop(ab, false);
if (ret) {
ath12k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
ret);
return ret;
}
ath12k_hif_irq_disable(ab);
ath12k_hif_ce_irq_disable(ab);
ret = ath12k_hif_suspend(ab);
if (ret) {
ath12k_warn(ab, "failed to suspend hif: %d\n", ret);
return ret;
}
return 0;
}
int ath12k_core_resume(struct ath12k_base *ab)
{
int ret;
if (!ab->hw_params->supports_suspend)
return -EOPNOTSUPP;
ret = ath12k_hif_resume(ab);
if (ret) {
ath12k_warn(ab, "failed to resume hif during resume: %d\n", ret);
return ret;
}
ath12k_hif_ce_irq_enable(ab);
ath12k_hif_irq_enable(ab);
ret = ath12k_dp_rx_pktlog_start(ab);
if (ret) {
ath12k_warn(ab, "failed to start rx pktlog during resume: %d\n",
ret);
return ret;
}
return 0;
}
static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
if (ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
scnprintf(name, name_len,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath12k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
ab->qmi.target.board_id, variant);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
return 0;
}
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *file)
{
const struct firmware *fw;
char path[100];
int ret;
if (!file)
return ERR_PTR(-ENOENT);
ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
ret = firmware_request_nowarn(&fw, path, ab->dev);
if (ret)
return ERR_PTR(ret);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
path, fw->size);
return fw;
}
void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
{
if (!IS_ERR(bd->fw))
release_firmware(bd->fw);
memset(bd, 0, sizeof(*bd));
}
static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
struct ath12k_board_data *bd,
const void *buf, size_t buf_len,
const char *boardname,
int bd_ie_type)
{
const struct ath12k_fw_ie *hdr;
bool name_match_found;
int ret, board_ie_id;
size_t board_ie_len;
const void *board_ie_data;
name_match_found = false;
/* go through ATH12K_BD_IE_BOARD_ elements */
while (buf_len > sizeof(struct ath12k_fw_ie)) {
hdr = buf;
board_ie_id = le32_to_cpu(hdr->id);
board_ie_len = le32_to_cpu(hdr->len);
board_ie_data = hdr->data;
buf_len -= sizeof(*hdr);
buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) {
ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n",
buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL;
goto out;
}
switch (board_ie_id) {
case ATH12K_BD_IE_BOARD_NAME:
ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname))
break;
ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret)
break;
name_match_found = true;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"boot found match for name '%s'",
boardname);
break;
case ATH12K_BD_IE_BOARD_DATA:
if (!name_match_found)
/* no match found */
break;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"boot found board data for '%s'", boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0;
goto out;
default:
ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n",
board_ie_id);
break;
}
/* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
buf_len -= board_ie_len;
buf += board_ie_len;
}
/* no match found */
ret = -ENOENT;
out:
return ret;
}
static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
struct ath12k_board_data *bd,
const char *boardname)
{
size_t len, magic_len;
const u8 *data;
char *filename, filepath[100];
size_t ie_len;
struct ath12k_fw_ie *hdr;
int ret, ie_id;
filename = ATH12K_BOARD_API2_FILE;
if (!bd->fw)
bd->fw = ath12k_core_firmware_request(ab, filename);
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
data = bd->fw->data;
len = bd->fw->size;
ath12k_core_create_firmware_path(ab, filename,
filepath, sizeof(filepath));
/* magic has extra null byte padded */
magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
if (len < magic_len) {
ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
filepath, len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
ath12k_err(ab, "found invalid board magic\n");
ret = -EINVAL;
goto err;
}
/* magic is padded to 4 bytes */
magic_len = ALIGN(magic_len, 4);
if (len < magic_len) {
ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
filepath, len);
ret = -EINVAL;
goto err;
}
data += magic_len;
len -= magic_len;
while (len > sizeof(struct ath12k_fw_ie)) {
hdr = (struct ath12k_fw_ie *)data;
ie_id = le32_to_cpu(hdr->id);
ie_len = le32_to_cpu(hdr->len);
len -= sizeof(*hdr);
data = hdr->data;
if (len < ALIGN(ie_len, 4)) {
ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
ie_id, ie_len, len);
ret = -EINVAL;
goto err;
}
switch (ie_id) {
case ATH12K_BD_IE_BOARD:
ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
ATH12K_BD_IE_BOARD);
if (ret == -ENOENT)
/* no match found, continue */
break;
else if (ret)
/* there was an error, bail out */
goto err;
/* either found or error, so stop searching */
goto out;
}
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
len -= ie_len;
data += ie_len;
}
out:
if (!bd->data || !bd->len) {
ath12k_err(ab,
"failed to fetch board data for %s from %s\n",
boardname, filepath);
ret = -ENODATA;
goto err;
}
return 0;
err:
ath12k_core_free_bdf(ab, bd);
return ret;
}
int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
struct ath12k_board_data *bd,
char *filename)
{
bd->fw = ath12k_core_firmware_request(ab, filename);
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
bd->data = bd->fw->data;
bd->len = bd->fw->size;
return 0;
}
#define BOARD_NAME_SIZE 100
int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
{
char boardname[BOARD_NAME_SIZE];
int ret;
ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
if (ret) {
ath12k_err(ab, "failed to create board name: %d", ret);
return ret;
}
ab->bd_api = 2;
ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname);
if (!ret)
goto success;
ab->bd_api = 1;
ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
if (ret) {
ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
ab->hw_params->fw.dir);
return ret;
}
success:
ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", ab->bd_api);
return 0;
}
static void ath12k_core_stop(struct ath12k_base *ab)
{
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
/* De-Init of components as needed */
}
static int ath12k_core_soc_create(struct ath12k_base *ab)
{
int ret;
ret = ath12k_qmi_init_service(ab);
if (ret) {
ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
return ret;
}
ret = ath12k_hif_power_up(ab);
if (ret) {
ath12k_err(ab, "failed to power up :%d\n", ret);
goto err_qmi_deinit;
}
return 0;
err_qmi_deinit:
ath12k_qmi_deinit_service(ab);
return ret;
}
static void ath12k_core_soc_destroy(struct ath12k_base *ab)
{
ath12k_dp_free(ab);
ath12k_reg_free(ab);
ath12k_qmi_deinit_service(ab);
}
static int ath12k_core_pdev_create(struct ath12k_base *ab)
{
int ret;
ret = ath12k_mac_register(ab);
if (ret) {
ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
return ret;
}
ret = ath12k_dp_pdev_alloc(ab);
if (ret) {
ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
goto err_mac_unregister;
}
return 0;
err_mac_unregister:
ath12k_mac_unregister(ab);
return ret;
}
static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
{
ath12k_mac_unregister(ab);
ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
}
static int ath12k_core_start(struct ath12k_base *ab,
enum ath12k_firmware_mode mode)
{
int ret;
ret = ath12k_wmi_attach(ab);
if (ret) {
ath12k_err(ab, "failed to attach wmi: %d\n", ret);
return ret;
}
ret = ath12k_htc_init(ab);
if (ret) {
ath12k_err(ab, "failed to init htc: %d\n", ret);
goto err_wmi_detach;
}
ret = ath12k_hif_start(ab);
if (ret) {
ath12k_err(ab, "failed to start HIF: %d\n", ret);
goto err_wmi_detach;
}
ret = ath12k_htc_wait_target(&ab->htc);
if (ret) {
ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
goto err_hif_stop;
}
ret = ath12k_dp_htt_connect(&ab->dp);
if (ret) {
ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
goto err_hif_stop;
}
ret = ath12k_wmi_connect(ab);
if (ret) {
ath12k_err(ab, "failed to connect wmi: %d\n", ret);
goto err_hif_stop;
}
ret = ath12k_htc_start(&ab->htc);
if (ret) {
ath12k_err(ab, "failed to start HTC: %d\n", ret);
goto err_hif_stop;
}
ret = ath12k_wmi_wait_for_service_ready(ab);
if (ret) {
ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
ret);
goto err_hif_stop;
}
ret = ath12k_mac_allocate(ab);
if (ret) {
ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
ret);
goto err_hif_stop;
}
ath12k_dp_cc_config(ab);
ath12k_dp_pdev_pre_alloc(ab);
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
goto err_mac_destroy;
}
ret = ath12k_wmi_cmd_init(ab);
if (ret) {
ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
goto err_reo_cleanup;
}
ret = ath12k_wmi_wait_for_unified_ready(ab);
if (ret) {
ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
ret);
goto err_reo_cleanup;
}
/* put hardware to DBS mode */
if (ab->hw_params->single_pdev_only) {
ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
if (ret) {
ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
goto err_reo_cleanup;
}
}
ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
if (ret) {
ath12k_err(ab, "failed to send htt version request message: %d\n",
ret);
goto err_reo_cleanup;
}
return 0;
err_reo_cleanup:
ath12k_dp_rx_pdev_reo_cleanup(ab);
err_mac_destroy:
ath12k_mac_destroy(ab);
err_hif_stop:
ath12k_hif_stop(ab);
err_wmi_detach:
ath12k_wmi_detach(ab);
return ret;
}
static int ath12k_core_start_firmware(struct ath12k_base *ab,
enum ath12k_firmware_mode mode)
{
int ret;
ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
&ab->qmi.ce_cfg.shadow_reg_v3_len);
ret = ath12k_qmi_firmware_start(ab, mode);
if (ret) {
ath12k_err(ab, "failed to send firmware start: %d\n", ret);
return ret;
}
return ret;
}
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
{
int ret;
ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath12k_err(ab, "failed to start firmware: %d\n", ret);
return ret;
}
ret = ath12k_ce_init_pipes(ab);
if (ret) {
ath12k_err(ab, "failed to initialize CE: %d\n", ret);
goto err_firmware_stop;
}
ret = ath12k_dp_alloc(ab);
if (ret) {
ath12k_err(ab, "failed to init DP: %d\n", ret);
goto err_firmware_stop;
}
mutex_lock(&ab->core_lock);
ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
}
ret = ath12k_core_pdev_create(ab);
if (ret) {
ath12k_err(ab, "failed to create pdev core: %d\n", ret);
goto err_core_stop;
}
ath12k_hif_irq_enable(ab);
mutex_unlock(&ab->core_lock);
return 0;
err_core_stop:
ath12k_core_stop(ab);
ath12k_mac_destroy(ab);
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
return ret;
}
static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
{
int ret;
mutex_lock(&ab->core_lock);
ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
ath12k_dp_free(ab);
ath12k_hal_srng_deinit(ab);
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
ret = ath12k_hal_srng_init(ab);
if (ret)
return ret;
clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
ret = ath12k_core_qmi_firmware_ready(ab);
if (ret)
goto err_hal_srng_deinit;
clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
return 0;
err_hal_srng_deinit:
ath12k_hal_srng_deinit(ab);
return ret;
}
void ath12k_core_halt(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
ar->num_created_vdevs = 0;
ar->allocated_vdev_map = 0;
ath12k_mac_scan_finish(ar);
ath12k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
INIT_LIST_HEAD(&ar->arvifs);
idr_init(&ar->txmgmt_idr);
}
static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev *pdev;
int i;
spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++;
spin_unlock_bh(&ab->base_lock);
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH12K_STATE_OFF)
continue;
ieee80211_stop_queues(ar->hw);
ath12k_mac_drain_tx(ar);
complete(&ar->scan.started);
complete(&ar->scan.completed);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
complete(&ar->vdev_setup_done);
complete(&ar->vdev_delete_done);
complete(&ar->bss_survey_done);
wake_up(&ar->dp.tx_empty_waitq);
idr_for_each(&ar->txmgmt_idr,
ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
wake_up(&ar->txmgmt_empty_waitq);
}
wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
}
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH12K_STATE_OFF)
continue;
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
case ATH12K_STATE_ON:
ar->state = ATH12K_STATE_RESTARTING;
ath12k_core_halt(ar);
ieee80211_restart_hw(ar->hw);
break;
case ATH12K_STATE_OFF:
ath12k_warn(ab,
"cannot restart radio %d that hasn't been started\n",
i);
break;
case ATH12K_STATE_RESTARTING:
break;
case ATH12K_STATE_RESTARTED:
ar->state = ATH12K_STATE_WEDGED;
fallthrough;
case ATH12K_STATE_WEDGED:
ath12k_warn(ab,
"device is wedged, will not restart radio %d\n", i);
break;
}
mutex_unlock(&ar->conf_mutex);
}
complete(&ab->driver_recovery);
}
static void ath12k_core_restart(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
int ret;
if (!ab->is_reset)
ath12k_core_pre_reconfigure_recovery(ab);
ret = ath12k_core_reconfigure_on_crash(ab);
if (ret) {
ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
return;
}
if (ab->is_reset)
complete_all(&ab->reconfigure_complete);
if (!ab->is_reset)
ath12k_core_post_reconfigure_recovery(ab);
}
static void ath12k_core_reset(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
int reset_count, fail_cont_count;
long time_left;
if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
return;
}
/* Sometimes the recovery will fail and then the next all recovery fail,
* this is to avoid infinite recovery since it can not recovery success
*/
fail_cont_count = atomic_read(&ab->fail_cont_count);
if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
return;
if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
time_before(jiffies, ab->reset_fail_timeout))
return;
reset_count = atomic_inc_return(&ab->reset_count);
if (reset_count > 1) {
/* Sometimes it happened another reset worker before the previous one
* completed, then the second reset worker will destroy the previous one,
* thus below is to avoid that.
*/
ath12k_warn(ab, "already resetting count %d\n", reset_count);
reinit_completion(&ab->reset_complete);
time_left = wait_for_completion_timeout(&ab->reset_complete,
ATH12K_RESET_TIMEOUT_HZ);
if (time_left) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
atomic_dec(&ab->reset_count);
return;
}
ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
/* Record the continuous recovery fail count when recovery failed*/
fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
}
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
ab->is_reset = true;
atomic_set(&ab->recovery_count, 0);
ath12k_core_pre_reconfigure_recovery(ab);
reinit_completion(&ab->reconfigure_complete);
ath12k_core_post_reconfigure_recovery(ab);
reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_start_count, 0);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
time_left = wait_for_completion_timeout(&ab->recovery_start,
ATH12K_RECOVER_START_TIMEOUT_HZ);
ath12k_hif_power_down(ab);
ath12k_hif_power_up(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
}
int ath12k_core_pre_init(struct ath12k_base *ab)
{
int ret;
ret = ath12k_hw_init(ab);
if (ret) {
ath12k_err(ab, "failed to init hw params: %d\n", ret);
return ret;
}
return 0;
}
int ath12k_core_init(struct ath12k_base *ab)
{
int ret;
ret = ath12k_core_soc_create(ab);
if (ret) {
ath12k_err(ab, "failed to create soc core: %d\n", ret);
return ret;
}
return 0;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
mutex_lock(&ab->core_lock);
ath12k_core_pdev_destroy(ab);
ath12k_core_stop(ab);
mutex_unlock(&ab->core_lock);
ath12k_hif_power_down(ab);
ath12k_mac_destroy(ab);
ath12k_core_soc_destroy(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
{
timer_delete_sync(&ab->rx_replenish_retry);
destroy_workqueue(ab->workqueue_aux);
destroy_workqueue(ab->workqueue);
kfree(ab);
}
struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
enum ath12k_bus bus)
{
struct ath12k_base *ab;
ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
if (!ab)
return NULL;
init_completion(&ab->driver_recovery);
ab->workqueue = create_singlethread_workqueue("ath12k_wq");
if (!ab->workqueue)
goto err_sc_free;
ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
if (!ab->workqueue_aux)
goto err_free_wq;
mutex_init(&ab->core_lock);
spin_lock_init(&ab->base_lock);
init_completion(&ab->reset_complete);
init_completion(&ab->reconfigure_complete);
init_completion(&ab->recovery_start);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
INIT_WORK(&ab->restart_work, ath12k_core_restart);
INIT_WORK(&ab->reset_work, ath12k_core_reset);
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
ab->dev = dev;
ab->hif.bus = bus;
return ab;
err_free_wq:
destroy_workqueue(ab->workqueue);
err_sc_free:
kfree(ab);
return NULL;
}
MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
|
linux-master
|
drivers/net/wireless/ath/ath12k/core.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
#include "core.h"
#include "debug.h"
void ath12k_info(struct ath12k_base *ab, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
va_start(args, fmt);
vaf.va = &args;
dev_info(ab->dev, "%pV", &vaf);
/* TODO: Trace the log */
va_end(args);
}
void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
va_start(args, fmt);
vaf.va = &args;
dev_err(ab->dev, "%pV", &vaf);
/* TODO: Trace the log */
va_end(args);
}
void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
va_start(args, fmt);
vaf.va = &args;
dev_warn_ratelimited(ab->dev, "%pV", &vaf);
/* TODO: Trace the log */
va_end(args);
}
#ifdef CONFIG_ATH12K_DEBUG
void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (ath12k_debug_mask & mask)
dev_dbg(ab->dev, "%pV", &vaf);
/* TODO: trace log */
va_end(args);
}
void ath12k_dbg_dump(struct ath12k_base *ab,
enum ath12k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
char linebuf[256];
size_t linebuflen;
const void *ptr;
if (ath12k_debug_mask & mask) {
if (msg)
__ath12k_dbg(ab, mask, "%s\n", msg);
for (ptr = buf; (ptr - buf) < len; ptr += 16) {
linebuflen = 0;
linebuflen += scnprintf(linebuf + linebuflen,
sizeof(linebuf) - linebuflen,
"%s%08x: ",
(prefix ? prefix : ""),
(unsigned int)(ptr - buf));
hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
linebuf + linebuflen,
sizeof(linebuf) - linebuflen, true);
dev_dbg(ab->dev, "%s\n", linebuf);
}
}
}
#endif /* CONFIG_ATH12K_DEBUG */
|
linux-master
|
drivers/net/wireless/ath/ath12k/debug.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <crypto/hash.h>
#include "core.h"
#include "debug.h"
#include "hal_desc.h"
#include "hw.h"
#include "dp_rx.h"
#include "hal_rx.h"
#include "dp_tx.h"
#include "peer.h"
#include "dp_mon.h"
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
}
u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
}
static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
}
static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
}
static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
return ieee80211_has_morefrags(hdr->frame_control);
}
static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
}
static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
}
u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
}
static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
}
static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
}
static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
}
static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
}
static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
}
static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
}
static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
}
static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
}
static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
}
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
}
static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
}
static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
}
static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
}
static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc,
u16 len)
{
ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
}
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));
}
static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
}
static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
}
static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
}
static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
}
static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
{
int i, reaped = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
do {
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
DP_MON_SERVICE_BUDGET,
ATH12K_DP_RX_MONITOR_MODE);
/* nothing more to reap */
if (reaped < DP_MON_SERVICE_BUDGET)
return 0;
} while (time_before(jiffies, timeout));
ath12k_warn(ab, "dp mon ring purge timeout");
return -ETIMEDOUT;
}
/* Returns number of Rx buffers replenished */
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
enum hal_rx_buf_return_buf_manager mgr,
bool hw_cc)
{
struct ath12k_buffer_addr *desc;
struct hal_srng *srng;
struct sk_buff *skb;
int num_free;
int num_remain;
int buf_id;
u32 cookie;
dma_addr_t paddr;
struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info *rx_desc;
req_entries = min(req_entries, rx_ring->bufs_max);
srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
req_entries = num_free;
req_entries = min(num_free, req_entries);
num_remain = req_entries;
while (num_remain > 0) {
skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
DP_RX_BUFFER_ALIGN_SIZE);
if (!skb)
break;
if (!IS_ALIGNED((unsigned long)skb->data,
DP_RX_BUFFER_ALIGN_SIZE)) {
skb_pull(skb,
PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
skb->data);
}
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (dma_mapping_error(ab->dev, paddr))
goto fail_free_skb;
if (hw_cc) {
spin_lock_bh(&dp->rx_desc_lock);
/* Get desc from free list and store in used list
* for cleanup purposes
*
* TODO: pass the removed descs rather than
* add/read to optimize
*/
rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
struct ath12k_rx_desc_info,
list);
if (!rx_desc) {
spin_unlock_bh(&dp->rx_desc_lock);
goto fail_dma_unmap;
}
rx_desc->skb = skb;
cookie = rx_desc->cookie;
list_del(&rx_desc->list);
list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
spin_unlock_bh(&dp->rx_desc_lock);
} else {
spin_lock_bh(&rx_ring->idr_lock);
buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
rx_ring->bufs_max * 3, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (buf_id < 0)
goto fail_dma_unmap;
cookie = u32_encode_bits(mac_id,
DP_RXDMA_BUF_COOKIE_PDEV_ID) |
u32_encode_bits(buf_id,
DP_RXDMA_BUF_COOKIE_BUF_ID);
}
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
goto fail_buf_unassign;
ATH12K_SKB_RXCB(skb)->paddr = paddr;
num_remain--;
ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
fail_buf_unassign:
if (hw_cc) {
spin_lock_bh(&dp->rx_desc_lock);
list_del(&rx_desc->list);
list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
rx_desc->skb = NULL;
spin_unlock_bh(&dp->rx_desc_lock);
} else {
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
}
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
}
static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring)
{
struct sk_buff *skb;
int buf_id;
spin_lock_bh(&rx_ring->idr_lock);
idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
idr_remove(&rx_ring->bufs_idr, buf_id);
/* TODO: Understand where internal driver does this dma_unmap
* of rxdma_buffer.
*/
dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
idr_destroy(&rx_ring->bufs_idr);
spin_unlock_bh(&rx_ring->idr_lock);
return 0;
}
static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
rx_ring = &dp->rxdma_mon_buf_ring;
ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
rx_ring = &dp->tx_mon_buf_ring;
ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
return 0;
}
static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
u32 ringtype)
{
int num_entries;
num_entries = rx_ring->refill_buf_ring.size /
ath12k_hal_srng_get_entrysize(ab, ringtype);
rx_ring->bufs_max = num_entries;
if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))
ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
else
ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries,
ab->hw_params->hal_params->rx_buf_rbm,
ringtype == HAL_RXDMA_BUF);
return 0;
}
static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
int ret;
ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
HAL_RXDMA_BUF);
if (ret) {
ath12k_warn(ab,
"failed to setup HAL_RXDMA_BUF\n");
return ret;
}
if (ab->hw_params->rxdma1_enable) {
rx_ring = &dp->rxdma_mon_buf_ring;
ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
HAL_RXDMA_MONITOR_BUF);
if (ret) {
ath12k_warn(ab,
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
rx_ring = &dp->tx_mon_buf_ring;
ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
HAL_TX_MONITOR_BUF);
if (ret) {
ath12k_warn(ab,
"failed to setup HAL_TX_MONITOR_BUF\n");
return ret;
}
}
return 0;
}
static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
{
struct ath12k_pdev_dp *dp = &ar->dp;
struct ath12k_base *ab = ar->ab;
int i;
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
}
}
void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i;
for (i = 0; i < DP_REO_DST_RING_MAX; i++)
ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
}
int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int ret;
int i;
for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
HAL_REO_DST, i, 0,
DP_REO_DST_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup reo_dst_ring\n");
goto err_reo_cleanup;
}
}
return 0;
err_reo_cleanup:
ath12k_dp_rx_pdev_reo_cleanup(ab);
return ret;
}
static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
{
struct ath12k_pdev_dp *dp = &ar->dp;
struct ath12k_base *ab = ar->ab;
int i;
int ret;
u32 mac_id = dp->mac_id;
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_dst_ring[i],
HAL_RXDMA_MONITOR_DST,
0, mac_id + i,
DP_RXDMA_MONITOR_DST_RING_SIZE);
if (ret) {
ath12k_warn(ar->ab,
"failed to setup HAL_RXDMA_MONITOR_DST\n");
return ret;
}
ret = ath12k_dp_srng_setup(ar->ab,
&dp->tx_mon_dst_ring[i],
HAL_TX_MONITOR_DST,
0, mac_id + i,
DP_TX_MONITOR_DEST_RING_SIZE);
if (ret) {
ath12k_warn(ar->ab,
"failed to setup HAL_TX_MONITOR_DST\n");
return ret;
}
}
return 0;
}
void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
dma_unmap_single(ab->dev, cmd->data.paddr,
cmd->data.size, DMA_BIDIRECTIONAL);
kfree(cmd->data.vaddr);
kfree(cmd);
}
list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
dma_unmap_single(ab->dev, cmd_cache->data.paddr,
cmd_cache->data.size, DMA_BIDIRECTIONAL);
kfree(cmd_cache->data.vaddr);
kfree(cmd_cache);
}
spin_unlock_bh(&dp->reo_cmd_lock);
}
static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
struct ath12k_dp_rx_tid *rx_tid = ctx;
if (status != HAL_REO_CMD_SUCCESS)
ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
enum hal_reo_cmd_type type,
struct ath12k_hal_reo_cmd *cmd,
void (*cb)(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status))
{
struct ath12k_dp *dp = &ab->dp;
struct ath12k_dp_rx_reo_cmd *dp_cmd;
struct hal_srng *cmd_ring;
int cmd_num;
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
/* cmd_num should start from 1, during failure return the error code */
if (cmd_num < 0)
return cmd_num;
/* reo cmd ring descriptors has cmd_num starting from 1 */
if (cmd_num == 0)
return -EINVAL;
if (!cb)
return 0;
/* Can this be optimized so that we keep the pending command list only
* for tid delete command to free up the resource on the command status
* indication?
*/
dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
if (!dp_cmd)
return -ENOMEM;
memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
dp_cmd->cmd_num = cmd_num;
dp_cmd->handler = cb;
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
spin_unlock_bh(&dp->reo_cmd_lock);
return 0;
}
static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
struct ath12k_dp_rx_tid *rx_tid)
{
struct ath12k_hal_reo_cmd cmd = {0};
unsigned long tot_desc_sz, desc_sz;
int ret;
tot_desc_sz = rx_tid->size;
desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
while (tot_desc_sz > desc_sz) {
tot_desc_sz -= desc_sz;
cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE, &cmd,
NULL);
if (ret)
ath12k_warn(ab,
"failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
rx_tid->tid, ret);
}
memset(&cmd, 0, sizeof(cmd));
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE,
&cmd, ath12k_dp_reo_cmd_free);
if (ret) {
ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
}
static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
struct ath12k_base *ab = dp->ab;
struct ath12k_dp_rx_tid *rx_tid = ctx;
struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
if (status == HAL_REO_CMD_DRAIN) {
goto free_desc;
} else if (status != HAL_REO_CMD_SUCCESS) {
/* Shouldn't happen! Cleanup in case of other failure? */
ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
rx_tid->tid, status);
return;
}
elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
if (!elem)
goto free_desc;
elem->ts = jiffies;
memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
spin_lock_bh(&dp->reo_cmd_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
dp->reo_cmd_cache_flush_count++;
/* Flush and invalidate aged REO desc from HW cache */
list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
list) {
if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
time_after(jiffies, elem->ts +
msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
list_del(&elem->list);
dp->reo_cmd_cache_flush_count--;
/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
* within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
* is used in only two contexts, one is in this function called
* from napi and the other in ath12k_dp_free during core destroy.
* Before dp_free, the irqs would be disabled and would wait to
* synchronize. Hence there wouldn’t be any race against add or
* delete to this list. Hence unlock-lock is safe here.
*/
spin_unlock_bh(&dp->reo_cmd_lock);
ath12k_dp_reo_cache_flush(ab, &elem->data);
kfree(elem);
spin_lock_bh(&dp->reo_cmd_lock);
}
}
spin_unlock_bh(&dp->reo_cmd_lock);
return;
free_desc:
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
dma_addr_t paddr)
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
if (!ab->hw_params->reoq_lut_support)
return;
/* TODO: based on ML peer or not, select the LUT. below assumes non
* ML peer
*/
qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(lower_32_bits(paddr),
BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(upper_32_bits(paddr),
BUFFER_ADDR_INFO1_ADDR) |
u32_encode_bits(tid, DP_REO_QREF_NUM);
}
static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
if (!ab->hw_params->reoq_lut_support)
return;
/* TODO: based on ML peer or not, select the LUT. below assumes non
* ML peer
*/
qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
u32_encode_bits(tid, DP_REO_QREF_NUM);
}
void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
struct ath12k_peer *peer, u8 tid)
{
struct ath12k_hal_reo_cmd cmd = {0};
struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
int ret;
if (!rx_tid->active)
return;
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
ath12k_dp_rx_tid_del_func);
if (ret) {
ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
rx_tid->active = false;
}
/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
* to struct hal_wbm_release_ring, I couldn't figure out the logic behind
* that.
*/
static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
struct hal_reo_dest_ring *ring,
enum hal_wbm_rel_bm_act action)
{
struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
struct hal_wbm_release_ring *desc;
struct ath12k_dp *dp = &ab->dp;
struct hal_srng *srng;
int ret = 0;
srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc) {
ret = -ENOBUFS;
goto exit;
}
ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
exit:
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return ret;
}
static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
bool rel_link_desc)
{
struct ath12k_base *ab = rx_tid->ab;
lockdep_assert_held(&ab->base_lock);
if (rx_tid->dst_ring_desc) {
if (rel_link_desc)
ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
kfree(rx_tid->dst_ring_desc);
rx_tid->dst_ring_desc = NULL;
}
rx_tid->cur_sn = 0;
rx_tid->last_frag_no = 0;
rx_tid->rx_frag_bitmap = 0;
__skb_queue_purge(&rx_tid->rx_frags);
}
void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
{
struct ath12k_dp_rx_tid *rx_tid;
int i;
lockdep_assert_held(&ar->ab->base_lock);
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
ath12k_dp_rx_peer_tid_delete(ar, peer, i);
ath12k_dp_rx_frags_cleanup(rx_tid, true);
spin_unlock_bh(&ar->ab->base_lock);
del_timer_sync(&rx_tid->frag_timer);
spin_lock_bh(&ar->ab->base_lock);
}
}
static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
struct ath12k_peer *peer,
struct ath12k_dp_rx_tid *rx_tid,
u32 ba_win_sz, u16 ssn,
bool update_ssn)
{
struct ath12k_hal_reo_cmd cmd = {0};
int ret;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
cmd.ba_window_size = ba_win_sz;
if (update_ssn) {
cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
}
ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
NULL);
if (ret) {
ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
rx_tid->tid, ret);
return ret;
}
rx_tid->ba_win_sz = ba_win_sz;
return 0;
}
int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
struct hal_rx_reo_queue *addr_aligned;
struct ath12k_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
u32 hw_desc_sz;
void *vaddr;
dma_addr_t paddr;
int ret;
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
return -ENOENT;
}
if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "reo qref table is not setup\n");
return -EINVAL;
}
if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
peer->peer_id, tid);
spin_unlock_bh(&ab->base_lock);
return -EINVAL;
}
rx_tid = &peer->rx_tid[tid];
/* Update the tid queue if it is already setup */
if (rx_tid->active) {
paddr = rx_tid->paddr;
ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
ba_win_sz, ssn, true);
spin_unlock_bh(&ab->base_lock);
if (ret) {
ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
return ret;
}
if (!ab->hw_params->reoq_lut_support) {
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
peer_mac,
paddr, tid, 1,
ba_win_sz);
if (ret) {
ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
tid, ret);
return ret;
}
}
return 0;
}
rx_tid->tid = tid;
rx_tid->ba_win_sz = ba_win_sz;
/* TODO: Optimize the memory allocation for qos tid based on
* the actual BA window size in REO tid update path.
*/
if (tid == HAL_DESC_REO_NON_QOS_TID)
hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
else
hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
if (!vaddr) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
}
addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
ssn, pn_type);
paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(ab->dev, paddr);
if (ret) {
spin_unlock_bh(&ab->base_lock);
goto err_mem_free;
}
rx_tid->vaddr = vaddr;
rx_tid->paddr = paddr;
rx_tid->size = hw_desc_sz;
rx_tid->active = true;
if (ab->hw_params->reoq_lut_support) {
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
paddr, tid, 1, ba_win_sz);
}
return ret;
err_mem_free:
kfree(vaddr);
return ret;
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
struct ieee80211_ampdu_params *params)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
int vdev_id = arsta->arvif->vdev_id;
int ret;
ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
params->tid, params->buf_size,
params->ssn, arsta->pn_type);
if (ret)
ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
return ret;
}
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
struct ieee80211_ampdu_params *params)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
int vdev_id = arsta->arvif->vdev_id;
bool active;
int ret;
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
return -ENOENT;
}
active = peer->rx_tid[params->tid].active;
if (!active) {
spin_unlock_bh(&ab->base_lock);
return 0;
}
ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
spin_unlock_bh(&ab->base_lock);
if (ret) {
ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
params->tid, ret);
return ret;
}
return ret;
}
int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key)
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
struct ath12k_hal_reo_cmd cmd = {0};
struct ath12k_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
u8 tid;
int ret = 0;
/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
* We use mac80211 PN/TSC replay check functionality for bcast/mcast
* for now.
*/
if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return 0;
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 = HAL_REO_CMD_UPD0_PN |
HAL_REO_CMD_UPD0_PN_SIZE |
HAL_REO_CMD_UPD0_PN_VALID |
HAL_REO_CMD_UPD0_PN_CHECK |
HAL_REO_CMD_UPD0_SVLD;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
if (key_cmd == SET_KEY) {
cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
cmd.pn_size = 48;
}
break;
default:
break;
}
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
peer_addr);
return -ENOENT;
}
for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->active)
continue;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE,
&cmd, NULL);
if (ret) {
ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
tid, peer_addr, ret);
break;
}
}
spin_unlock_bh(&ab->base_lock);
return ret;
}
static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
u16 peer_id)
{
int i;
for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
if (ppdu_stats->user_stats[i].is_valid_peer_id) {
if (peer_id == ppdu_stats->user_stats[i].peer_id)
return i;
} else {
return i;
}
}
return -EINVAL;
}
static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *ptr,
void *data)
{
const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
const struct htt_ppdu_stats_user_rate *user_rate;
struct htt_ppdu_stats_info *ppdu_info;
struct htt_ppdu_user_stats *user_stats;
int cur_user;
u16 peer_id;
ppdu_info = data;
switch (tag) {
case HTT_PPDU_STATS_TAG_COMMON:
if (len < sizeof(struct htt_ppdu_stats_common)) {
ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
memcpy(&ppdu_info->ppdu_stats.common, ptr,
sizeof(struct htt_ppdu_stats_common));
break;
case HTT_PPDU_STATS_TAG_USR_RATE:
if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
user_rate = ptr;
peer_id = le16_to_cpu(user_rate->sw_peer_id);
cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
peer_id);
if (cur_user < 0)
return -EINVAL;
user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
user_stats->peer_id = peer_id;
user_stats->is_valid_peer_id = true;
memcpy(&user_stats->rate, ptr,
sizeof(struct htt_ppdu_stats_user_rate));
user_stats->tlv_flags |= BIT(tag);
break;
case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
cmplt_cmn = ptr;
peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
peer_id);
if (cur_user < 0)
return -EINVAL;
user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
user_stats->peer_id = peer_id;
user_stats->is_valid_peer_id = true;
memcpy(&user_stats->cmpltn_cmn, ptr,
sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
user_stats->tlv_flags |= BIT(tag);
break;
case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
if (len <
sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
len, tag);
return -EINVAL;
}
ba_status = ptr;
peer_id = le16_to_cpu(ba_status->sw_peer_id);
cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
peer_id);
if (cur_user < 0)
return -EINVAL;
user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
user_stats->peer_id = peer_id;
user_stats->is_valid_peer_id = true;
memcpy(&user_stats->ack_ba, ptr,
sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
user_stats->tlv_flags |= BIT(tag);
break;
}
return 0;
}
static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
const void *ptr, void *data),
void *data)
{
const struct htt_tlv *tlv;
const void *begin = ptr;
u16 tlv_tag, tlv_len;
int ret = -EINVAL;
while (len > 0) {
if (len < sizeof(*tlv)) {
ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
ptr - begin, len, sizeof(*tlv));
return -EINVAL;
}
tlv = (struct htt_tlv *)ptr;
tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
ptr += sizeof(*tlv);
len -= sizeof(*tlv);
if (tlv_len > len) {
ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
ret = iter(ab, tlv_tag, tlv_len, ptr, data);
if (ret == -ENOMEM)
return ret;
ptr += tlv_len;
len -= tlv_len;
}
return 0;
}
static void
ath12k_update_per_peer_tx_stats(struct ath12k *ar,
struct htt_ppdu_stats *ppdu_stats, u8 user)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ieee80211_sta *sta;
struct ath12k_sta *arsta;
struct htt_ppdu_stats_user_rate *user_rate;
struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
struct htt_ppdu_stats_common *common = &ppdu_stats->common;
int ret;
u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
u32 v, succ_bytes = 0;
u16 tones, rate = 0, succ_pkts = 0;
u32 tx_duration = 0;
u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
bool is_ampdu = false;
if (!usr_stats)
return;
if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
return;
if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
is_ampdu =
HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
if (usr_stats->tlv_flags &
BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
tid = le32_get_bits(usr_stats->ack_ba.info,
HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
}
if (common->fes_duration_us)
tx_duration = le32_to_cpu(common->fes_duration_us);
user_rate = &usr_stats->rate;
flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
/* Note: If host configured fixed rates and in some other special
* cases, the broadcast/management frames are sent in different rates.
* Firmware rate's control to be skipped for this?
*/
if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
mcs, nss);
return;
}
if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
flags,
&rate_idx,
&rate);
if (ret < 0)
return;
}
rcu_read_lock();
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
if (!peer || !peer->sta) {
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
return;
}
sta = peer->sta;
arsta = (struct ath12k_sta *)sta->drv_priv;
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
switch (flags) {
case WMI_RATE_PREAMBLE_OFDM:
arsta->txrate.legacy = rate;
break;
case WMI_RATE_PREAMBLE_CCK:
arsta->txrate.legacy = rate;
break;
case WMI_RATE_PREAMBLE_HT:
arsta->txrate.mcs = mcs + 8 * (nss - 1);
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case WMI_RATE_PREAMBLE_VHT:
arsta->txrate.mcs = mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case WMI_RATE_PREAMBLE_HE:
arsta->txrate.mcs = mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
arsta->txrate.he_dcm = dcm;
arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
tones = le16_to_cpu(user_rate->ru_end) -
le16_to_cpu(user_rate->ru_start) + 1;
v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
arsta->txrate.he_ru_alloc = v;
break;
}
arsta->txrate.nss = nss;
arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
arsta->tx_duration += tx_duration;
memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
* So skip peer stats update for mgmt packets.
*/
if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
memset(peer_stats, 0, sizeof(*peer_stats));
peer_stats->succ_pkts = succ_pkts;
peer_stats->succ_bytes = succ_bytes;
peer_stats->is_ampdu = is_ampdu;
peer_stats->duration = tx_duration;
peer_stats->ba_fails =
HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
}
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
}
static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
struct htt_ppdu_stats *ppdu_stats)
{
u8 user;
for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
}
static
struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
u32 ppdu_id)
{
struct htt_ppdu_stats_info *ppdu_info;
lockdep_assert_held(&ar->data_lock);
if (!list_empty(&ar->ppdu_stats_info)) {
list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
if (ppdu_info->ppdu_id == ppdu_id)
return ppdu_info;
}
if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
ppdu_info = list_first_entry(&ar->ppdu_stats_info,
typeof(*ppdu_info), list);
list_del(&ppdu_info->list);
ar->ppdu_stat_list_depth--;
ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
kfree(ppdu_info);
}
}
ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
if (!ppdu_info)
return NULL;
list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
ar->ppdu_stat_list_depth++;
return ppdu_info;
}
static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
struct htt_ppdu_user_stats *usr_stats)
{
peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
peer->ppdu_stats_delayba.resp_rate_flags =
le32_to_cpu(usr_stats->rate.resp_rate_flags);
peer->delayba_flag = true;
}
static void ath12k_copy_to_bar(struct ath12k_peer *peer,
struct htt_ppdu_user_stats *usr_stats)
{
usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
usr_stats->rate.resp_rate_flags =
cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
peer->delayba_flag = false;
}
static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ath12k_htt_ppdu_stats_msg *msg;
struct htt_ppdu_stats_info *ppdu_info;
struct ath12k_peer *peer = NULL;
struct htt_ppdu_user_stats *usr_stats = NULL;
u32 peer_id = 0;
struct ath12k *ar;
int ret, i;
u8 pdev_id;
u32 ppdu_id, len;
msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
ppdu_id = le32_to_cpu(msg->ppdu_id);
rcu_read_lock();
ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
ret = -EINVAL;
goto exit;
}
spin_lock_bh(&ar->data_lock);
ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
if (!ppdu_info) {
spin_unlock_bh(&ar->data_lock);
ret = -EINVAL;
goto exit;
}
ppdu_info->ppdu_id = ppdu_id;
ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
ath12k_htt_tlv_ppdu_stats_parse,
(void *)ppdu_info);
if (ret) {
spin_unlock_bh(&ar->data_lock);
ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
goto exit;
}
/* back up data rate tlv for all peers */
if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
(ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
ppdu_info->delay_ba) {
for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, peer_id);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
continue;
}
usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
if (usr_stats->delay_ba)
ath12k_copy_to_delay_stats(peer, usr_stats);
spin_unlock_bh(&ab->base_lock);
}
}
/* restore all peers' data rate tlv to mu-bar tlv */
if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
(ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
for (i = 0; i < ppdu_info->bar_num_users; i++) {
peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, peer_id);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
continue;
}
usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
if (peer->delayba_flag)
ath12k_copy_to_bar(peer, usr_stats);
spin_unlock_bh(&ab->base_lock);
}
}
spin_unlock_bh(&ar->data_lock);
exit:
rcu_read_unlock();
return ret;
}
static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ath12k_htt_mlo_offset_msg *msg;
struct ath12k_pdev *pdev;
struct ath12k *ar;
u8 pdev_id;
msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
return;
}
spin_lock_bh(&ar->data_lock);
pdev = ar->pdev;
pdev->timestamp.info = __le32_to_cpu(msg->info);
pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
spin_unlock_bh(&ar->data_lock);
}
void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ath12k_dp *dp = &ab->dp;
struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
enum htt_t2h_msg_type type;
u16 peer_id;
u8 vdev_id;
u8 mac_addr[ETH_ALEN];
u16 peer_mac_h16;
u16 ast_hash = 0;
u16 hw_peer_id;
type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
switch (type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF:
dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
HTT_T2H_VERSION_CONF_MAJOR);
dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
HTT_T2H_VERSION_CONF_MINOR);
complete(&dp->htt_tgt_version_received);
break;
/* TODO: remove unused peer map versions after testing */
case HTT_T2H_MSG_TYPE_PEER_MAP:
vdev_id = le32_get_bits(resp->peer_map_ev.info,
HTT_T2H_PEER_MAP_INFO_VDEV_ID);
peer_id = le32_get_bits(resp->peer_map_ev.info,
HTT_T2H_PEER_MAP_INFO_PEER_ID);
peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
peer_mac_h16, mac_addr);
ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP2:
vdev_id = le32_get_bits(resp->peer_map_ev.info,
HTT_T2H_PEER_MAP_INFO_VDEV_ID);
peer_id = le32_get_bits(resp->peer_map_ev.info,
HTT_T2H_PEER_MAP_INFO_PEER_ID);
peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
peer_mac_h16, mac_addr);
ast_hash = le32_get_bits(resp->peer_map_ev.info2,
HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
hw_peer_id);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP3:
vdev_id = le32_get_bits(resp->peer_map_ev.info,
HTT_T2H_PEER_MAP_INFO_VDEV_ID);
peer_id = le32_get_bits(resp->peer_map_ev.info,
HTT_T2H_PEER_MAP_INFO_PEER_ID);
peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
peer_mac_h16, mac_addr);
ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
peer_id);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
peer_id = le32_get_bits(resp->peer_unmap_ev.info,
HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
ath12k_peer_unmap_event(ab, peer_id);
break;
case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
ath12k_htt_pull_ppdu_stats(ab, skb);
break;
case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
break;
case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
ath12k_htt_mlo_offset_event_handler(ab, skb);
break;
default:
ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
type);
break;
}
dev_kfree_skb_any(skb);
}
static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
struct sk_buff_head *msdu_list,
struct sk_buff *first, struct sk_buff *last,
u8 l3pad_bytes, int msdu_len)
{
struct ath12k_base *ab = ar->ab;
struct sk_buff *skb;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
int buf_first_hdr_len, buf_first_len;
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
* the length of the msdu in the first buffer.
*/
buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
skb_put(first, buf_first_hdr_len + msdu_len);
skb_pull(first, buf_first_hdr_len);
return 0;
}
ldesc = (struct hal_rx_desc *)last->data;
rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
/* MSDU spans over multiple buffers because the length of the MSDU
* exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
* in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
*/
skb_put(first, DP_RX_BUFFER_SIZE);
skb_pull(first, buf_first_hdr_len);
/* When an MSDU spread over multiple buffers MSDU_END
* tlvs are valid only in the last buffer. Copy those tlvs.
*/
ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
if (space_extra > 0 &&
(pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
/* Free up all buffers of the MSDU */
while ((skb = __skb_dequeue(msdu_list)) != NULL) {
rxcb = ATH12K_SKB_RXCB(skb);
if (!rxcb->is_continuation) {
dev_kfree_skb_any(skb);
break;
}
dev_kfree_skb_any(skb);
}
return -ENOMEM;
}
rem_len = msdu_len - buf_first_len;
while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
rxcb = ATH12K_SKB_RXCB(skb);
if (rxcb->is_continuation)
buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
else
buf_len = rem_len;
if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
WARN_ON_ONCE(1);
dev_kfree_skb_any(skb);
return -EINVAL;
}
skb_put(skb, buf_len + hal_rx_desc_sz);
skb_pull(skb, hal_rx_desc_sz);
skb_copy_from_linear_data(skb, skb_put(first, buf_len),
buf_len);
dev_kfree_skb_any(skb);
rem_len -= buf_len;
if (!rxcb->is_continuation)
break;
}
return 0;
}
static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
struct sk_buff *first)
{
struct sk_buff *skb;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
if (!rxcb->is_continuation)
return first;
skb_queue_walk(msdu_list, skb) {
rxcb = ATH12K_SKB_RXCB(skb);
if (!rxcb->is_continuation)
return skb;
}
return NULL;
}
static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ath12k_base *ab = ar->ab;
bool ip_csum_fail, l4_csum_fail;
ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
}
static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
case HAL_ENCRYPT_TYPE_TKIP_MIC:
return 0;
case HAL_ENCRYPT_TYPE_CCMP_128:
return IEEE80211_CCMP_MIC_LEN;
case HAL_ENCRYPT_TYPE_CCMP_256:
return IEEE80211_CCMP_256_MIC_LEN;
case HAL_ENCRYPT_TYPE_GCMP_128:
case HAL_ENCRYPT_TYPE_AES_GCMP_256:
return IEEE80211_GCMP_MIC_LEN;
case HAL_ENCRYPT_TYPE_WEP_40:
case HAL_ENCRYPT_TYPE_WEP_104:
case HAL_ENCRYPT_TYPE_WEP_128:
case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
case HAL_ENCRYPT_TYPE_WAPI:
break;
}
ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
return 0;
}
static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
return 0;
case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
case HAL_ENCRYPT_TYPE_TKIP_MIC:
return IEEE80211_TKIP_IV_LEN;
case HAL_ENCRYPT_TYPE_CCMP_128:
return IEEE80211_CCMP_HDR_LEN;
case HAL_ENCRYPT_TYPE_CCMP_256:
return IEEE80211_CCMP_256_HDR_LEN;
case HAL_ENCRYPT_TYPE_GCMP_128:
case HAL_ENCRYPT_TYPE_AES_GCMP_256:
return IEEE80211_GCMP_HDR_LEN;
case HAL_ENCRYPT_TYPE_WEP_40:
case HAL_ENCRYPT_TYPE_WEP_104:
case HAL_ENCRYPT_TYPE_WEP_128:
case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
case HAL_ENCRYPT_TYPE_WAPI:
break;
}
ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
return 0;
}
static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
case HAL_ENCRYPT_TYPE_CCMP_128:
case HAL_ENCRYPT_TYPE_CCMP_256:
case HAL_ENCRYPT_TYPE_GCMP_128:
case HAL_ENCRYPT_TYPE_AES_GCMP_256:
return 0;
case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
case HAL_ENCRYPT_TYPE_TKIP_MIC:
return IEEE80211_TKIP_ICV_LEN;
case HAL_ENCRYPT_TYPE_WEP_40:
case HAL_ENCRYPT_TYPE_WEP_104:
case HAL_ENCRYPT_TYPE_WEP_128:
case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
case HAL_ENCRYPT_TYPE_WAPI:
break;
}
ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
return 0;
}
static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
struct sk_buff *msdu,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
struct ieee80211_hdr *hdr;
size_t hdr_len;
u8 *crypto_hdr;
u16 qos_ctl;
/* pull decapped header */
hdr = (struct ieee80211_hdr *)msdu->data;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
skb_pull(msdu, hdr_len);
/* Rebuild qos header */
hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
/* Reset the order bit as the HT_Control header is stripped */
hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
qos_ctl = rxcb->tid;
if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
/* TODO: Add other QoS ctl fields when required */
/* copy decap header before overwriting for reuse below */
memcpy(decap_hdr, hdr, hdr_len);
/* Rebuild crypto header for mac80211 use */
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
ath12k_dp_rx_desc_get_crypto_header(ar->ab,
rxcb->rx_desc, crypto_hdr,
enctype);
}
memcpy(skb_push(msdu,
IEEE80211_QOS_CTL_LEN), &qos_ctl,
IEEE80211_QOS_CTL_LEN);
memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
}
static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status,
bool decrypted)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
if (!rxcb->is_first_msdu ||
!(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
WARN_ON_ONCE(1);
return;
}
skb_trim(msdu, msdu->len - FCS_LEN);
if (!decrypted)
return;
hdr = (void *)msdu->data;
/* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
ath12k_dp_rx_crypto_mic_len(ar, enctype));
skb_trim(msdu, msdu->len -
ath12k_dp_rx_crypto_icv_len(ar, enctype));
} else {
/* MIC */
if (status->flag & RX_FLAG_MIC_STRIPPED)
skb_trim(msdu, msdu->len -
ath12k_dp_rx_crypto_mic_len(ar, enctype));
/* ICV */
if (status->flag & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
ath12k_dp_rx_crypto_icv_len(ar, enctype));
}
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
memmove(msdu->data + crypto_len, msdu->data, hdr_len);
skb_pull(msdu, crypto_len);
}
}
static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
struct sk_buff *msdu,
struct ath12k_skb_rxcb *rxcb,
struct ieee80211_rx_status *status,
enum hal_encrypt_type enctype)
{
struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath12k_base *ab = ar->ab;
size_t hdr_len, crypto_len;
struct ieee80211_hdr *hdr;
u16 qos_ctl;
__le16 fc;
u8 *crypto_hdr;
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
crypto_hdr = skb_push(msdu, crypto_len);
ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
}
fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
hdr_len = ieee80211_hdrlen(fc);
skb_push(msdu, hdr_len);
hdr = (struct ieee80211_hdr *)msdu->data;
hdr->frame_control = fc;
/* Get wifi header from rx_desc */
ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
if (rxcb->is_mcbc)
status->flag &= ~RX_FLAG_PN_VALIDATED;
/* Add QOS header */
if (ieee80211_is_data_qos(hdr->frame_control)) {
qos_ctl = rxcb->tid;
if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
/* TODO: Add other QoS ctl fields when required */
memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
&qos_ctl, IEEE80211_QOS_CTL_LEN);
}
}
static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
struct sk_buff *msdu,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status)
{
struct ieee80211_hdr *hdr;
struct ethhdr *eth;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
eth = (struct ethhdr *)msdu->data;
ether_addr_copy(da, eth->h_dest);
ether_addr_copy(sa, eth->h_source);
rfc.snap_type = eth->h_proto;
skb_pull(msdu, sizeof(*eth));
memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
sizeof(rfc));
ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
/* original 802.11 header has a different DA and in
* case of 4addr it may also have different SA
*/
hdr = (struct ieee80211_hdr *)msdu->data;
ether_addr_copy(ieee80211_get_DA(hdr), da);
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status,
bool decrypted)
{
struct ath12k_base *ab = ar->ab;
u8 decap;
struct ethhdr *ehdr;
decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
switch (decap) {
case DP_RX_DECAP_TYPE_NATIVE_WIFI:
ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
break;
case DP_RX_DECAP_TYPE_RAW:
ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
decrypted);
break;
case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
ehdr = (struct ethhdr *)msdu->data;
/* mac80211 allows fast path only for authorized STA */
if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
ATH12K_SKB_RXCB(msdu)->is_eapol = true;
ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
break;
}
/* PN for mcast packets will be validated in mac80211;
* remove eth header and add 802.11 header.
*/
if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
break;
case DP_RX_DECAP_TYPE_8023:
/* TODO: Handle undecap for these formats */
break;
}
}
struct ath12k_peer *
ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath12k_peer *peer = NULL;
lockdep_assert_held(&ab->base_lock);
if (rxcb->peer_id)
peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
if (peer)
return peer;
if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
return NULL;
peer = ath12k_peer_find_by_addr(ab,
ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
rx_desc));
return peer;
}
static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
bool fill_crypto_hdr;
struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
struct ieee80211_hdr *hdr;
struct ath12k_peer *peer;
u32 err_bitmap;
/* PN for multicast packets will be checked in mac80211 */
rxcb = ATH12K_SKB_RXCB(msdu);
fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);
rxcb->is_mcbc = fill_crypto_hdr;
if (rxcb->is_mcbc)
rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
if (peer) {
if (rxcb->is_mcbc)
enctype = peer->sec_type_grp;
else
enctype = peer->sec_type;
} else {
enctype = HAL_ENCRYPT_TYPE_OPEN;
}
spin_unlock_bh(&ar->ab->base_lock);
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
/* Clear per-MPDU flags while leaving per-PPDU flags intact */
rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED);
if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
rx_status->flag |= RX_FLAG_MMIC_ERROR;
if (is_decrypted) {
rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
if (fill_crypto_hdr)
rx_status->flag |= RX_FLAG_MIC_STRIPPED |
RX_FLAG_ICV_STRIPPED;
else
rx_status->flag |= RX_FLAG_IV_STRIPPED |
RX_FLAG_PN_VALIDATED;
}
ath12k_dp_rx_h_csum_offload(ar, msdu);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
enctype, rx_status, is_decrypted);
if (!is_decrypted || fill_crypto_hdr)
return;
if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
}
static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
struct ath12k_base *ab = ar->ab;
struct ieee80211_supported_band *sband;
enum rx_msdu_start_pkt_type pkt_type;
u8 bw;
u8 rate_mcs, nss;
u8 sgi;
bool is_cck;
pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
nss = ath12k_dp_rx_h_nss(ab, rx_desc);
sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
switch (pkt_type) {
case RX_MSDU_START_PKT_TYPE_11A:
case RX_MSDU_START_PKT_TYPE_11B:
is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
sband = &ar->mac.sbands[rx_status->band];
rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
is_cck);
break;
case RX_MSDU_START_PKT_TYPE_11N:
rx_status->encoding = RX_ENC_HT;
if (rate_mcs > ATH12K_HT_MCS_MAX) {
ath12k_warn(ar->ab,
"Received with invalid mcs in HT mode %d\n",
rate_mcs);
break;
}
rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
if (sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
break;
case RX_MSDU_START_PKT_TYPE_11AC:
rx_status->encoding = RX_ENC_VHT;
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH12K_VHT_MCS_MAX) {
ath12k_warn(ar->ab,
"Received with invalid mcs in VHT mode %d\n",
rate_mcs);
break;
}
rx_status->nss = nss;
if (sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
break;
case RX_MSDU_START_PKT_TYPE_11AX:
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH12K_HE_MCS_MAX) {
ath12k_warn(ar->ab,
"Received with invalid mcs in HE mode %d\n",
rate_mcs);
break;
}
rx_status->encoding = RX_ENC_HE;
rx_status->nss = nss;
rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
break;
}
}
void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
struct ath12k_base *ab = ar->ab;
u8 channel_num;
u32 center_freq, meta_data;
struct ieee80211_channel *channel;
rx_status->freq = 0;
rx_status->rate_idx = 0;
rx_status->nss = 0;
rx_status->encoding = RX_ENC_LEGACY;
rx_status->bw = RATE_INFO_BW_20;
rx_status->enc_flags = 0;
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
channel_num = meta_data;
center_freq = meta_data >> 16;
if (center_freq >= 5935 && center_freq <= 7105) {
rx_status->band = NL80211_BAND_6GHZ;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
channel = ar->rx_channel;
if (channel) {
rx_status->band = channel->band;
channel_num =
ieee80211_frequency_to_channel(channel->center_freq);
}
spin_unlock_bh(&ar->data_lock);
ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
rx_desc, sizeof(*rx_desc));
}
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
rx_status->band);
ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
struct ieee80211_rx_status *status)
{
struct ath12k_base *ab = ar->ab;
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
};
struct ieee80211_radiotap_he *he;
struct ieee80211_rx_status *rx_status;
struct ieee80211_sta *pubsta;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
u8 decap = DP_RX_DECAP_TYPE_RAW;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol = rxcb->is_eapol;
if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
!(status->flag & RX_FLAG_SKIP_MONITOR)) {
he = skb_push(msdu, sizeof(known));
memcpy(he, &known, sizeof(known));
status->flag |= RX_FLAG_RADIOTAP_HE;
}
if (!(status->flag & RX_FLAG_ONLY_MONITOR))
decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
spin_lock_bh(&ab->base_lock);
peer = ath12k_dp_rx_h_find_peer(ab, msdu);
pubsta = peer ? peer->sta : NULL;
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
"rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
rxcb->tid,
is_mcbc ? "mcast" : "ucast",
ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
(status->encoding == RX_ENC_HE) ? "he" : "",
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->nss,
status->freq,
status->band, status->flag,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
msdu->data, msdu->len);
rx_status = IEEE80211_SKB_RXCB(msdu);
*rx_status = *status;
/* TODO: trace rx packet */
/* PN for multicast packets are not validate in HW,
* so skip 802.3 rx path
* Also, fast_rx expects the STA to be authorized, hence
* eapol packets are sent in slow path.
*/
if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
}
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list,
struct ieee80211_rx_status *rx_status)
{
struct ath12k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
struct ath12k_skb_rxcb *rxcb;
struct sk_buff *last_buf;
u8 l3_pad_bytes;
u16 msdu_len;
int ret;
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
if (!last_buf) {
ath12k_warn(ab,
"No valid Rx buffer to access MSDU_END tlv\n");
ret = -EIO;
goto free_out;
}
rx_desc = (struct hal_rx_desc *)msdu->data;
lrx_desc = (struct hal_rx_desc *)last_buf->data;
if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
ret = -EIO;
goto free_out;
}
rxcb = ATH12K_SKB_RXCB(msdu);
rxcb->rx_desc = rx_desc;
msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
if (rxcb->is_frag) {
skb_pull(msdu, hal_rx_desc_sz);
} else if (!rxcb->is_continuation) {
if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
ret = -EINVAL;
ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
sizeof(*rx_desc));
goto free_out;
}
skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
} else {
ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
msdu, last_buf,
l3_pad_bytes, msdu_len);
if (ret) {
ath12k_warn(ab,
"failed to coalesce msdu rx buffer%d\n", ret);
goto free_out;
}
}
ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
return 0;
free_out:
return ret;
}
static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
struct napi_struct *napi,
struct sk_buff_head *msdu_list,
int ring_id)
{
struct ieee80211_rx_status rx_status = {0};
struct ath12k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath12k *ar;
u8 mac_id, pdev_id;
int ret;
if (skb_queue_empty(msdu_list))
return;
rcu_read_lock();
while ((msdu = __skb_dequeue(msdu_list))) {
rxcb = ATH12K_SKB_RXCB(msdu);
mac_id = rxcb->mac_id;
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
ar = ab->pdevs[pdev_id].ar;
if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
continue;
}
ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
if (ret) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"Unable to process msdu %d", ret);
dev_kfree_skb_any(msdu);
continue;
}
ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
}
rcu_read_unlock();
}
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
struct hal_reo_dest_ring *desc;
int num_buffs_reaped = 0;
struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
struct hal_srng *srng;
struct sk_buff *msdu;
bool done = false;
int mac_id;
u64 desc_va;
__skb_queue_head_init(&msdu_list);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
try_again:
ath12k_hal_srng_access_begin(ab, srng);
while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
cookie = le32_get_bits(desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_SW_COOKIE);
mac_id = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
le32_to_cpu(desc->buf_va_lo));
desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
/* retry manual desc retrieval */
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
continue;
}
}
if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
ath12k_warn(ab, "Check HW CC implementation");
msdu = desc_info->skb;
desc_info->skb = NULL;
spin_lock_bh(&dp->rx_desc_lock);
list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
spin_unlock_bh(&dp->rx_desc_lock);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
num_buffs_reaped++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
continue;
}
rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->mac_id = mac_id;
rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
RX_MPDU_DESC_META_DATA_PEER_ID);
rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
RX_MPDU_DESC_INFO0_TID);
__skb_queue_tail(&msdu_list, msdu);
if (!rxcb->is_continuation) {
total_msdu_reaped++;
done = true;
} else {
done = false;
}
if (total_msdu_reaped >= budget)
break;
}
/* Hw might have updated the head pointer after we cached it.
* In this case, even though there are entries in the ring we'll
* get rx_desc NULL. Give the read another try with updated cached
* head pointer so that we can reap complete MPDU in the current
* rx processing.
*/
if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
ath12k_hal_srng_access_end(ab, srng);
goto try_again;
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
if (!total_msdu_reaped)
goto exit;
/* TODO: Move to implicit BM? */
ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
ab->hw_params->hal_params->rx_buf_rbm, true);
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
exit:
return total_msdu_reaped;
}
static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
{
struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
spin_lock_bh(&rx_tid->ab->base_lock);
if (rx_tid->last_frag_no &&
rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
spin_unlock_bh(&rx_tid->ab->base_lock);
return;
}
ath12k_dp_rx_frags_cleanup(rx_tid, true);
spin_unlock_bh(&rx_tid->ab->base_lock);
}
int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
{
struct ath12k_base *ab = ar->ab;
struct crypto_shash *tfm;
struct ath12k_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
int i;
tfm = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
return -ENOENT;
}
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
rx_tid->ab = ab;
timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
skb_queue_head_init(&rx_tid->rx_frags);
}
peer->tfm_mmic = tfm;
spin_unlock_bh(&ab->base_lock);
return 0;
}
static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
struct ieee80211_hdr *hdr, u8 *data,
size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm);
u8 mic_hdr[16] = {0};
u8 tid = 0;
int ret;
if (!tfm)
return -EINVAL;
desc->tfm = tfm;
ret = crypto_shash_setkey(tfm, key, 8);
if (ret)
goto out;
ret = crypto_shash_init(desc);
if (ret)
goto out;
/* TKIP MIC header */
memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
if (ieee80211_is_data_qos(hdr->frame_control))
tid = ieee80211_get_tid(hdr);
mic_hdr[12] = tid;
ret = crypto_shash_update(desc, mic_hdr, 16);
if (ret)
goto out;
ret = crypto_shash_update(desc, data, data_len);
if (ret)
goto out;
ret = crypto_shash_final(desc, mic);
out:
shash_desc_zero(desc);
return ret;
}
static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
struct sk_buff *msdu)
{
struct ath12k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
struct ieee80211_key_conf *key_conf;
struct ieee80211_hdr *hdr;
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
u8 *key, *data;
u8 key_idx;
if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
return 0;
hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
if (!is_multicast_ether_addr(hdr->addr1))
key_idx = peer->ucast_keyidx;
else
key_idx = peer->mcast_keyidx;
key_conf = peer->keys[key_idx];
data = msdu->data + head_len;
data_len = msdu->len - head_len - tail_len;
key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
goto mic_fail;
return 0;
mic_fail:
(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
skb_pull(msdu, hal_rx_desc_sz);
ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
ieee80211_rx(ar->hw, msdu);
return -EINVAL;
}
static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
enum hal_encrypt_type enctype, u32 flags)
{
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
if (!flags)
return;
hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
if (flags & RX_FLAG_MIC_STRIPPED)
skb_trim(msdu, msdu->len -
ath12k_dp_rx_crypto_mic_len(ar, enctype));
if (flags & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
ath12k_dp_rx_crypto_icv_len(ar, enctype));
if (flags & RX_FLAG_IV_STRIPPED) {
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
memmove(msdu->data + hal_rx_desc_sz + crypto_len,
msdu->data + hal_rx_desc_sz, hdr_len);
skb_pull(msdu, crypto_len);
}
}
static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
struct ath12k_peer *peer,
struct ath12k_dp_rx_tid *rx_tid,
struct sk_buff **defrag_skb)
{
struct ath12k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc;
struct sk_buff *skb, *first_frag, *last_frag;
struct ieee80211_hdr *hdr;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
int msdu_len = 0;
int extra_space;
u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
first_frag = skb_peek(&rx_tid->rx_frags);
last_frag = skb_peek_tail(&rx_tid->rx_frags);
skb_queue_walk(&rx_tid->rx_frags, skb) {
flags = 0;
rx_desc = (struct hal_rx_desc *)skb->data;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
if (enctype != HAL_ENCRYPT_TYPE_OPEN)
is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
rx_desc);
if (is_decrypted) {
if (skb != first_frag)
flags |= RX_FLAG_IV_STRIPPED;
if (skb != last_frag)
flags |= RX_FLAG_ICV_STRIPPED |
RX_FLAG_MIC_STRIPPED;
}
/* RX fragments are always raw packets */
if (skb != last_frag)
skb_trim(skb, skb->len - FCS_LEN);
ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
if (skb != first_frag)
skb_pull(skb, hal_rx_desc_sz +
ieee80211_hdrlen(hdr->frame_control));
msdu_len += skb->len;
}
extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
if (extra_space > 0 &&
(pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
return -ENOMEM;
__skb_unlink(first_frag, &rx_tid->rx_frags);
while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
skb_put_data(first_frag, skb->data, skb->len);
dev_kfree_skb_any(skb);
}
hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
first_frag = NULL;
*defrag_skb = first_frag;
return 0;
}
static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct ath12k_dp_rx_tid *rx_tid,
struct sk_buff *defrag_skb)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
struct hal_reo_entrance_ring *reo_ent_ring;
struct hal_reo_dest_ring *reo_dest_ring;
struct dp_link_desc_bank *link_desc_banks;
struct hal_rx_msdu_link *msdu_link;
struct hal_rx_msdu_details *msdu0;
struct hal_srng *srng;
dma_addr_t link_paddr, buf_paddr;
u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
u32 cookie, hal_rx_desc_sz, dest_ring_info0;
int ret;
struct ath12k_rx_desc_info *desc_info;
u8 dst_ind;
hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
link_desc_banks = dp->link_desc_banks;
reo_dest_ring = rx_tid->dst_ring_desc;
ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
&link_paddr, &cookie);
desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
(link_paddr - link_desc_banks[desc_bank].paddr));
msdu0 = &msdu_link->msdu_link[0];
msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
memset(msdu0, 0, sizeof(*msdu0));
msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
/* change msdu len in hal rx desc */
ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
DMA_FROM_DEVICE);
if (dma_mapping_error(ab->dev, buf_paddr))
return -ENOMEM;
spin_lock_bh(&dp->rx_desc_lock);
desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
struct ath12k_rx_desc_info,
list);
if (!desc_info) {
spin_unlock_bh(&dp->rx_desc_lock);
ath12k_warn(ab, "failed to find rx desc for reinject\n");
ret = -ENOMEM;
goto err_unmap_dma;
}
desc_info->skb = defrag_skb;
list_del(&desc_info->list);
list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
spin_unlock_bh(&dp->rx_desc_lock);
ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
desc_info->cookie,
HAL_RX_BUF_RBM_SW3_BM);
/* Fill mpdu details into reo entrance ring */
srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!reo_ent_ring) {
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
ret = -ENOSPC;
goto err_free_desc;
}
memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
cookie,
HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
reo_ent_ring->rx_mpdu_info.peer_meta_data =
reo_dest_ring->rx_mpdu_info.peer_meta_data;
/* Firmware expects physical address to be filled in queue_addr_lo in
* the MLO scenario and in case of non MLO peer meta data needs to be
* filled.
* TODO: Need to handle for MLO scenario.
*/
reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
reo_ent_ring->info0 = le32_encode_bits(dst_ind,
HAL_REO_ENTR_RING_INFO0_DEST_IND);
reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
reo_ent_ring->info2 =
cpu_to_le32(u32_get_bits(dest_ring_info0,
HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return 0;
err_free_desc:
spin_lock_bh(&dp->rx_desc_lock);
list_del(&desc_info->list);
list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
desc_info->skb = NULL;
spin_unlock_bh(&dp->rx_desc_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
DMA_FROM_DEVICE);
return ret;
}
static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
struct sk_buff *a, struct sk_buff *b)
{
int frag1, frag2;
frag1 = ath12k_dp_rx_h_frag_no(ab, a);
frag2 = ath12k_dp_rx_h_frag_no(ab, b);
return frag1 - frag2;
}
static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
struct sk_buff_head *frag_list,
struct sk_buff *cur_frag)
{
struct sk_buff *skb;
int cmp;
skb_queue_walk(frag_list, skb) {
cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
if (cmp < 0)
continue;
__skb_queue_before(frag_list, skb, cur_frag);
return;
}
__skb_queue_tail(frag_list, cur_frag);
}
static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
pn = ehdr[0];
pn |= (u64)ehdr[1] << 8;
pn |= (u64)ehdr[4] << 16;
pn |= (u64)ehdr[5] << 24;
pn |= (u64)ehdr[6] << 32;
pn |= (u64)ehdr[7] << 40;
return pn;
}
static bool
ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
{
struct ath12k_base *ab = ar->ab;
enum hal_encrypt_type encrypt_type;
struct sk_buff *first_frag, *skb;
struct hal_rx_desc *desc;
u64 last_pn;
u64 cur_pn;
first_frag = skb_peek(&rx_tid->rx_frags);
desc = (struct hal_rx_desc *)first_frag->data;
encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
return true;
last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
skb_queue_walk(&rx_tid->rx_frags, skb) {
if (skb == first_frag)
continue;
cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
if (cur_pn != last_pn + 1)
return false;
last_pn = cur_pn;
}
return true;
}
static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_reo_dest_ring *ring_desc)
{
struct ath12k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc;
struct ath12k_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
struct sk_buff *defrag_skb = NULL;
u32 peer_id;
u16 seqno, frag_no;
u8 tid;
int ret = 0;
bool more_frags;
rx_desc = (struct hal_rx_desc *)msdu->data;
peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
tid = ath12k_dp_rx_h_tid(ab, rx_desc);
seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
!ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
tid > IEEE80211_NUM_TIDS)
return -EINVAL;
/* received unfragmented packet in reo
* exception ring, this shouldn't happen
* as these packets typically come from
* reo2sw srngs.
*/
if (WARN_ON_ONCE(!frag_no && !more_frags))
return -EINVAL;
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, peer_id);
if (!peer) {
ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
peer_id);
ret = -ENOENT;
goto out_unlock;
}
rx_tid = &peer->rx_tid[tid];
if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
skb_queue_empty(&rx_tid->rx_frags)) {
/* Flush stored fragments and start a new sequence */
ath12k_dp_rx_frags_cleanup(rx_tid, true);
rx_tid->cur_sn = seqno;
}
if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
/* Fragment already present */
ret = -EINVAL;
goto out_unlock;
}
if (frag_no > __fls(rx_tid->rx_frag_bitmap))
__skb_queue_tail(&rx_tid->rx_frags, msdu);
else
ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
rx_tid->rx_frag_bitmap |= BIT(frag_no);
if (!more_frags)
rx_tid->last_frag_no = frag_no;
if (frag_no == 0) {
rx_tid->dst_ring_desc = kmemdup(ring_desc,
sizeof(*rx_tid->dst_ring_desc),
GFP_ATOMIC);
if (!rx_tid->dst_ring_desc) {
ret = -ENOMEM;
goto out_unlock;
}
} else {
ath12k_dp_rx_link_desc_return(ab, ring_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
if (!rx_tid->last_frag_no ||
rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
mod_timer(&rx_tid->frag_timer, jiffies +
ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
goto out_unlock;
}
spin_unlock_bh(&ab->base_lock);
del_timer_sync(&rx_tid->frag_timer);
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, peer_id);
if (!peer)
goto err_frags_cleanup;
if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
goto err_frags_cleanup;
if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
goto err_frags_cleanup;
if (!defrag_skb)
goto err_frags_cleanup;
if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
goto err_frags_cleanup;
ath12k_dp_rx_frags_cleanup(rx_tid, false);
goto out_unlock;
err_frags_cleanup:
dev_kfree_skb_any(defrag_skb);
ath12k_dp_rx_frags_cleanup(rx_tid, true);
out_unlock:
spin_unlock_bh(&ab->base_lock);
return ret;
}
static int
ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
bool drop, u32 cookie)
{
struct ath12k_base *ab = ar->ab;
struct sk_buff *msdu;
struct ath12k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
u16 msdu_len;
u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
struct ath12k_rx_desc_info *desc_info;
u64 desc_va;
desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
le32_to_cpu(desc->buf_va_lo));
desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
/* retry manual desc retrieval */
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
return -EINVAL;
}
}
if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
ath12k_warn(ab, " RX Exception, Check HW CC implementation");
msdu = desc_info->skb;
desc_info->skb = NULL;
spin_lock_bh(&ab->dp.rx_desc_lock);
list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
spin_unlock_bh(&ab->dp.rx_desc_lock);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ar->ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
if (drop) {
dev_kfree_skb_any(msdu);
return 0;
}
rcu_read_lock();
if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
dev_kfree_skb_any(msdu);
goto exit;
}
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
dev_kfree_skb_any(msdu);
goto exit;
}
rx_desc = (struct hal_rx_desc *)msdu->data;
msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
sizeof(*rx_desc));
dev_kfree_skb_any(msdu);
goto exit;
}
skb_put(msdu, hal_rx_desc_sz + msdu_len);
if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
dev_kfree_skb_any(msdu);
ath12k_dp_rx_link_desc_return(ar->ab, desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
exit:
rcu_read_unlock();
return 0;
}
int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int budget)
{
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
int tot_n_bufs_reaped, quota, ret, i;
struct hal_reo_dest_ring *reo_desc;
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
u32 desc_bank, num_msdus;
struct hal_srng *srng;
struct ath12k_dp *dp;
int mac_id;
struct ath12k *ar;
dma_addr_t paddr;
bool is_frag;
bool drop = false;
int pdev_id;
tot_n_bufs_reaped = 0;
quota = budget;
dp = &ab->dp;
reo_except = &dp->reo_except_ring;
link_desc_banks = dp->link_desc_banks;
srng = &ab->hal.srng_list[reo_except->ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
ab->soc_stats.err_ring_pkts++;
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
if (ret) {
ath12k_warn(ab, "failed to parse error reo desc %d\n",
ret);
continue;
}
link_desc_va = link_desc_banks[desc_bank].vaddr +
(paddr - link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
rbm != ab->hw_params->hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
ath12k_dp_rx_link_desc_return(ab, reo_desc,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
RX_MPDU_DESC_INFO0_FRAG_FLAG);
/* Process only rx fragments with one msdu per link desc below, and drop
* msdu's indicated due to error reasons.
*/
if (!is_frag || num_msdus > 1) {
drop = true;
/* Return the link desc back to wbm idle list */
ath12k_dp_rx_link_desc_return(ab, reo_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
for (i = 0; i < num_msdus; i++) {
mac_id = le32_get_bits(reo_desc->info0,
HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
ar = ab->pdevs[pdev_id].ar;
if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
msdu_cookies[i]))
tot_n_bufs_reaped++;
}
if (tot_n_bufs_reaped >= quota) {
tot_n_bufs_reaped = quota;
goto exit;
}
budget = quota - tot_n_bufs_reaped;
}
exit:
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
rx_ring = &dp->rx_refill_buf_ring;
ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped,
ab->hw_params->hal_params->rx_buf_rbm, true);
return tot_n_bufs_reaped;
}
static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
int msdu_len,
struct sk_buff_head *msdu_list)
{
struct sk_buff *skb, *tmp;
struct ath12k_skb_rxcb *rxcb;
int n_buffs;
n_buffs = DIV_ROUND_UP(msdu_len,
(DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
skb_queue_walk_safe(msdu_list, skb, tmp) {
rxcb = ATH12K_SKB_RXCB(skb);
if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
if (!n_buffs)
break;
__skb_unlink(skb, msdu_list);
dev_kfree_skb_any(skb);
n_buffs--;
}
}
}
static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status,
struct sk_buff_head *msdu_list)
{
struct ath12k_base *ab = ar->ab;
u16 msdu_len, peer_id;
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
peer_id = ath12k_dp_rx_h_peer_id(ab, desc);
spin_lock(&ab->base_lock);
if (!ath12k_peer_find_by_id(ab, peer_id)) {
spin_unlock(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n",
peer_id);
return -EINVAL;
}
spin_unlock(&ab->base_lock);
if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
/* First buffer will be freed by the caller, so deduct it's length */
msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
return -EINVAL;
}
/* Even after cleaning up the sg buffers in the msdu list with above check
* any msdu received with continuation flag needs to be dropped as invalid.
* This protects against some random err frame with continuation flag.
*/
if (rxcb->is_continuation)
return -EINVAL;
if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
ath12k_warn(ar->ab,
"msdu_done bit not set in null_q_des processing\n");
__skb_queue_purge(msdu_list);
return -EIO;
}
/* Handle NULL queue descriptor violations arising out a missing
* REO queue for a given peer or a given TID. This typically
* may happen if a packet is received on a QOS enabled TID before the
* ADDBA negotiation for that TID, when the TID queue is setup. Or
* it may also happen for MC/BC frames if they are not routed to the
* non-QOS TID queue, in the absence of any other default TID queue.
* This error can show up both in a REO destination or WBM release ring.
*/
if (rxcb->is_frag) {
skb_pull(msdu, hal_rx_desc_sz);
} else {
l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
return -EINVAL;
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
}
ath12k_dp_rx_h_ppdu(ar, desc, status);
ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
/* Please note that caller will having the access to msdu and completing
* rx with mac80211. Need not worry about cleaning up amsdu_list.
*/
return 0;
}
static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status,
struct sk_buff_head *msdu_list)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
bool drop = false;
ar->ab->soc_stats.reo_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
drop = true;
break;
case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
/* TODO: Do not drop PN failed packets in the driver;
* instead, it is good to drop such packets in mac80211
* after incrementing the replay counters.
*/
fallthrough;
default:
/* TODO: Review other errors and process them to mac80211
* as appropriate.
*/
drop = true;
break;
}
return drop;
}
static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status)
{
struct ath12k_base *ab = ar->ab;
u16 msdu_len;
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
ath12k_dp_rx_h_ppdu(ar, desc, status);
status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED);
ath12k_dp_rx_h_undecap(ar, msdu, desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
}
static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
bool drop = false;
u32 err_bitmap;
ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
break;
}
fallthrough;
default:
/* TODO: Review other rxdma error code to check if anything is
* worth reporting to mac80211
*/
drop = true;
break;
}
return drop;
}
static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
struct napi_struct *napi,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ieee80211_rx_status rxs = {0};
bool drop = true;
switch (rxcb->err_rel_src) {
case HAL_WBM_REL_SRC_MODULE_REO:
drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
break;
case HAL_WBM_REL_SRC_MODULE_RXDMA:
drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
break;
default:
/* msdu will get freed */
break;
}
if (drop) {
dev_kfree_skb_any(msdu);
return;
}
ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
}
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
struct sk_buff_head msdu_list[MAX_RADIOS];
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
int mac_id;
int num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
int ret, i;
for (i = 0; i < ab->num_radios; i++)
__skb_queue_head_init(&msdu_list[i]);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
rx_ring = &dp->rx_refill_buf_ring;
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
while (budget) {
rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
if (!rx_desc)
break;
ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
if (ret) {
ath12k_warn(ab,
"failed to parse rx error in wbm_rel ring desc %d\n",
ret);
continue;
}
desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc;
/* retry manual desc retrieval if hw cc is not done */
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
if (!desc_info) {
ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
continue;
}
}
/* FIXME: Extract mac id correctly. Since descs are not tied
* to mac, we can extract from vdev id in ring desc.
*/
mac_id = 0;
if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
msdu = desc_info->skb;
desc_info->skb = NULL;
spin_lock_bh(&dp->rx_desc_lock);
list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
spin_unlock_bh(&dp->rx_desc_lock);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
num_buffs_reaped++;
if (!err_info.continuation)
budget--;
if (err_info.push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
dev_kfree_skb_any(msdu);
continue;
}
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
__skb_queue_tail(&msdu_list[mac_id], msdu);
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
rxcb->is_continuation = err_info.continuation;
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
if (!num_buffs_reaped)
goto done;
ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
ab->hw_params->hal_params->rx_buf_rbm, true);
rcu_read_lock();
for (i = 0; i < ab->num_radios; i++) {
if (!rcu_dereference(ab->pdevs_active[i])) {
__skb_queue_purge(&msdu_list[i]);
continue;
}
ar = ab->pdevs[i].ar;
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
__skb_queue_purge(&msdu_list[i]);
continue;
}
while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
}
rcu_read_unlock();
done:
return num_buffs_reaped;
}
void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct hal_tlv_64_hdr *hdr;
struct hal_srng *srng;
struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
bool found = false;
u16 tag;
struct hal_reo_status reo_status;
srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
memset(&reo_status, 0, sizeof(reo_status));
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
switch (tag) {
case HAL_REO_GET_QUEUE_STATS_STATUS:
ath12k_hal_reo_status_queue_stats(ab, hdr,
&reo_status);
break;
case HAL_REO_FLUSH_QUEUE_STATUS:
ath12k_hal_reo_flush_queue_status(ab, hdr,
&reo_status);
break;
case HAL_REO_FLUSH_CACHE_STATUS:
ath12k_hal_reo_flush_cache_status(ab, hdr,
&reo_status);
break;
case HAL_REO_UNBLOCK_CACHE_STATUS:
ath12k_hal_reo_unblk_cache_status(ab, hdr,
&reo_status);
break;
case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
&reo_status);
break;
case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
&reo_status);
break;
case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
&reo_status);
break;
default:
ath12k_warn(ab, "Unknown reo status type %d\n", tag);
continue;
}
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
found = true;
list_del(&cmd->list);
break;
}
}
spin_unlock_bh(&dp->reo_cmd_lock);
if (found) {
cmd->handler(dp, (void *)&cmd->data,
reo_status.uniform_hdr.cmd_status);
kfree(cmd);
}
found = false;
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
}
void ath12k_dp_rx_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i;
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
}
for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
ath12k_dp_rxdma_buf_free(ab);
}
void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
{
struct ath12k *ar = ab->pdevs[mac_id].ar;
ath12k_dp_rx_pdev_srng_free(ar);
}
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
tlv_filter.offset_valid = true;
tlv_filter.rx_packet_offset = hal_rx_desc_sz;
tlv_filter.rx_mpdu_start_offset =
ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
/* TODO: Selectively subscribe to required qwords within msdu_end
* and mpdu_start and setup the mask in below msg
* and modify the rx_desc struct
*/
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
HAL_RXDMA_BUF,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
return ret;
}
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
int i;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
tlv_filter.offset_valid = true;
tlv_filter.rx_packet_offset = hal_rx_desc_sz;
tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
tlv_filter.rx_mpdu_start_offset =
ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
/* TODO: Selectively subscribe to required qwords within msdu_end
* and mpdu_start and setup the mask in below msg
* and modify the rx_desc struct
*/
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
HAL_RXDMA_BUF,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
}
return ret;
}
int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
u32 ring_id;
int i, ret;
/* TODO: Need to verify the HTT setup for QCN9224 */
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
if (ret) {
ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
ret);
return ret;
}
if (ab->hw_params->rx_mac_buf_ring) {
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
i, HAL_RXDMA_BUF);
if (ret) {
ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
i, ret);
return ret;
}
}
}
for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
ring_id = dp->rxdma_err_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
i, HAL_RXDMA_DST);
if (ret) {
ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
i, ret);
return ret;
}
}
if (ab->hw_params->rxdma1_enable) {
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
0, HAL_RXDMA_MONITOR_BUF);
if (ret) {
ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
ret);
return ret;
}
ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
0, HAL_TX_MONITOR_BUF);
if (ret) {
ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
ret);
return ret;
}
}
ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
if (ret) {
ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
return ret;
}
return 0;
}
int ath12k_dp_rx_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i, ret;
idr_init(&dp->rx_refill_buf_ring.bufs_idr);
spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
idr_init(&dp->tx_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
ret = ath12k_dp_srng_setup(ab,
&dp->rx_refill_buf_ring.refill_buf_ring,
HAL_RXDMA_BUF, 0, 0,
DP_RXDMA_BUF_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
return ret;
}
if (ab->hw_params->rx_mac_buf_ring) {
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
i, 1024);
if (ret) {
ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
i);
return ret;
}
}
}
for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
HAL_RXDMA_DST, 0, i,
DP_RXDMA_ERR_DST_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
return ret;
}
}
if (ab->hw_params->rxdma1_enable) {
ret = ath12k_dp_srng_setup(ab,
&dp->rxdma_mon_buf_ring.refill_buf_ring,
HAL_RXDMA_MONITOR_BUF, 0, 0,
DP_RXDMA_MONITOR_BUF_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
ret = ath12k_dp_srng_setup(ab,
&dp->tx_mon_buf_ring.refill_buf_ring,
HAL_TX_MONITOR_BUF, 0, 0,
DP_TX_MONITOR_BUF_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
return ret;
}
}
ret = ath12k_dp_rxdma_buf_setup(ab);
if (ret) {
ath12k_warn(ab, "failed to setup rxdma ring\n");
return ret;
}
return 0;
}
int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
{
struct ath12k *ar = ab->pdevs[mac_id].ar;
struct ath12k_pdev_dp *dp = &ar->dp;
u32 ring_id;
int i;
int ret;
if (!ab->hw_params->rxdma1_enable)
goto out;
ret = ath12k_dp_rx_pdev_srng_alloc(ar);
if (ret) {
ath12k_warn(ab, "failed to setup rx srngs\n");
return ret;
}
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i,
HAL_RXDMA_MONITOR_DST);
if (ret) {
ath12k_warn(ab,
"failed to configure rxdma_mon_dst_ring %d %d\n",
i, ret);
return ret;
}
ring_id = dp->tx_mon_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i,
HAL_TX_MONITOR_DST);
if (ret) {
ath12k_warn(ab,
"failed to configure tx_mon_dst_ring %d %d\n",
i, ret);
return ret;
}
}
out:
return 0;
}
static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
{
struct ath12k_pdev_dp *dp = &ar->dp;
struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
skb_queue_head_init(&pmon->rx_status_q);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
memset(&pmon->rx_mon_stats, 0,
sizeof(pmon->rx_mon_stats));
return 0;
}
int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
{
struct ath12k_pdev_dp *dp = &ar->dp;
struct ath12k_mon_data *pmon = &dp->mon_data;
int ret = 0;
ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
if (ret) {
ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
return ret;
}
/* if rxdma1_enable is false, no need to setup
* rxdma_mon_desc_ring.
*/
if (!ar->ab->hw_params->rxdma1_enable)
return 0;
pmon->mon_last_linkdesc_paddr = 0;
pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
spin_lock_init(&pmon->mon_lock);
return 0;
}
int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
{
/* start reap timer */
mod_timer(&ab->mon_reap_timer,
jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
return 0;
}
int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
{
int ret;
if (stop_timer)
del_timer_sync(&ab->mon_reap_timer);
/* reap all the monitor related rings */
ret = ath12k_dp_purge_mon_ring(ab);
if (ret) {
ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
return ret;
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/dp_rx.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "peer.h"
#include "debug.h"
struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
const u8 *addr)
{
struct ath12k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
}
return NULL;
}
static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
u8 pdev_idx, const u8 *addr)
{
struct ath12k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (peer->pdev_idx != pdev_idx)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
}
return NULL;
}
struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
const u8 *addr)
{
struct ath12k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
}
return NULL;
}
struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
int peer_id)
{
struct ath12k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list)
if (peer_id == peer->peer_id)
return peer;
return NULL;
}
bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
{
struct ath12k_peer *peer;
spin_lock_bh(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (vdev_id == peer->vdev_id) {
spin_unlock_bh(&ab->base_lock);
return true;
}
}
spin_unlock_bh(&ab->base_lock);
return false;
}
struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
int ast_hash)
{
struct ath12k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list)
if (ast_hash == peer->ast_hash)
return peer;
return NULL;
}
void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
{
struct ath12k_peer *peer;
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, peer_id);
if (!peer) {
ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
peer_id);
goto exit;
}
ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, peer_id);
list_del(&peer->list);
kfree(peer);
wake_up(&ab->peer_mapping_wq);
exit:
spin_unlock_bh(&ab->base_lock);
}
void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
{
struct ath12k_peer *peer;
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, vdev_id, mac_addr);
if (!peer) {
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer)
goto exit;
peer->vdev_id = vdev_id;
peer->peer_id = peer_id;
peer->ast_hash = ast_hash;
peer->hw_peer_id = hw_peer_id;
ether_addr_copy(peer->addr, mac_addr);
list_add(&peer->list, &ab->peers);
wake_up(&ab->peer_mapping_wq);
}
ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
vdev_id, mac_addr, peer_id);
exit:
spin_unlock_bh(&ab->base_lock);
}
static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
const u8 *addr, bool expect_mapped)
{
int ret;
ret = wait_event_timeout(ab->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ab->base_lock);
mapped = !!ath12k_peer_find(ab, vdev_id, addr);
spin_unlock_bh(&ab->base_lock);
(mapped == expect_mapped ||
test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
}), 3 * HZ);
if (ret <= 0)
return -ETIMEDOUT;
return 0;
}
void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
{
struct ath12k_peer *peer, *tmp;
struct ath12k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ab->base_lock);
}
static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
{
return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
}
int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
const u8 *addr)
{
int ret;
unsigned long time_left;
ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
if (ret) {
ath12k_warn(ar->ab, "failed wait for peer deleted");
return ret;
}
time_left = wait_for_completion_timeout(&ar->peer_delete_done,
3 * HZ);
if (time_left == 0) {
ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
return -ETIMEDOUT;
}
return 0;
}
int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->peer_delete_done);
ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
ath12k_warn(ar->ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
}
ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
if (ret)
return ret;
ar->num_peers--;
return 0;
}
static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
{
return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
}
int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_create_arg *arg)
{
struct ath12k_peer *peer;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->num_peers > (ar->max_num_peers - 1)) {
ath12k_warn(ar->ab,
"failed to create peer due to insufficient peer entry resource in firmware\n");
return -ENOBUFS;
}
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
if (peer) {
spin_unlock_bh(&ar->ab->base_lock);
return -EINVAL;
}
spin_unlock_bh(&ar->ab->base_lock);
ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
if (ret) {
ath12k_warn(ar->ab,
"failed to send peer create vdev_id %d ret %d\n",
arg->vdev_id, ret);
return ret;
}
ret = ath12k_wait_for_peer_created(ar, arg->vdev_id,
arg->peer_addr);
if (ret)
return ret;
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
arg->peer_addr, arg->vdev_id);
reinit_completion(&ar->peer_delete_done);
ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
arg->vdev_id);
if (ret) {
ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
arg->vdev_id, arg->peer_addr);
return ret;
}
ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
arg->peer_addr);
if (ret)
return ret;
return -ENOENT;
}
peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
if (arvif->vif->type == NL80211_IFTYPE_STATION) {
arvif->ast_hash = peer->ast_hash;
arvif->ast_idx = peer->hw_peer_id;
}
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/peer.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hal_desc.h"
#include "hal.h"
#include "hal_tx.h"
#include "hif.h"
#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
/* dscp_tid_map - Default DSCP-TID mapping
*=================
* DSCP TID
*=================
* 000xxx 0
* 001xxx 1
* 010xxx 2
* 011xxx 3
* 100xxx 4
* 101xxx 5
* 110xxx 6
* 111xxx 7
*/
static inline u8 dscp2tid(u8 dscp)
{
return dscp >> 3;
}
void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
struct hal_tcl_data_cmd *tcl_cmd,
struct hal_tx_info *ti)
{
tcl_cmd->buf_addr_info.info0 =
le32_encode_bits(ti->paddr, BUFFER_ADDR_INFO0_ADDR);
tcl_cmd->buf_addr_info.info1 =
le32_encode_bits(((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT),
BUFFER_ADDR_INFO1_ADDR);
tcl_cmd->buf_addr_info.info1 |=
le32_encode_bits((ti->rbm_id), BUFFER_ADDR_INFO1_RET_BUF_MGR) |
le32_encode_bits(ti->desc_id, BUFFER_ADDR_INFO1_SW_COOKIE);
tcl_cmd->info0 =
le32_encode_bits(ti->type, HAL_TCL_DATA_CMD_INFO0_DESC_TYPE) |
le32_encode_bits(ti->bank_id, HAL_TCL_DATA_CMD_INFO0_BANK_ID);
tcl_cmd->info1 =
le32_encode_bits(ti->meta_data_flags,
HAL_TCL_DATA_CMD_INFO1_CMD_NUM);
tcl_cmd->info2 = cpu_to_le32(ti->flags0) |
le32_encode_bits(ti->data_len, HAL_TCL_DATA_CMD_INFO2_DATA_LEN) |
le32_encode_bits(ti->pkt_offset, HAL_TCL_DATA_CMD_INFO2_PKT_OFFSET);
tcl_cmd->info3 = cpu_to_le32(ti->flags1) |
le32_encode_bits(ti->tid, HAL_TCL_DATA_CMD_INFO3_TID) |
le32_encode_bits(ti->lmac_id, HAL_TCL_DATA_CMD_INFO3_PMAC_ID) |
le32_encode_bits(ti->vdev_id, HAL_TCL_DATA_CMD_INFO3_VDEV_ID);
tcl_cmd->info4 = le32_encode_bits(ti->bss_ast_idx,
HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX) |
le32_encode_bits(ti->bss_ast_hash,
HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM);
tcl_cmd->info5 = 0;
}
void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id)
{
u32 ctrl_reg_val;
u32 addr;
u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE], dscp, tid;
int i;
u32 value;
ctrl_reg_val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG);
/* Enable read/write access */
ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
(4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
/* Configure each DSCP-TID mapping in three bits there by configure
* three bytes in an iteration.
*/
for (i = 0, dscp = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 3) {
tid = dscp2tid(dscp);
value = u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP0);
dscp++;
tid = dscp2tid(dscp);
value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP1);
dscp++;
tid = dscp2tid(dscp);
value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP2);
dscp++;
tid = dscp2tid(dscp);
value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP3);
dscp++;
tid = dscp2tid(dscp);
value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP4);
dscp++;
tid = dscp2tid(dscp);
value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP5);
dscp++;
tid = dscp2tid(dscp);
value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP6);
dscp++;
tid = dscp2tid(dscp);
value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP7);
dscp++;
memcpy(&hw_map_val[i], &value, 3);
}
for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
ath12k_hif_write32(ab, addr, *(u32 *)&hw_map_val[i]);
addr += 4;
}
/* Disable read/write access */
ctrl_reg_val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG);
ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
HAL_TCL1_RING_CMN_CTRL_REG,
ctrl_reg_val);
}
void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config,
u8 bank_id)
{
ath12k_hif_write32(ab, HAL_TCL_SW_CONFIG_BANK_ADDR + 4 * bank_id,
bank_config);
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/hal_tx.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
#include "hal_rx.h"
#include "debug.h"
#include "hal_desc.h"
#include "hif.h"
static const struct hal_srng_config hw_srng_config_template[] = {
/* TODO: max_rings can populated by querying HW capabilities */
[HAL_REO_DST] = {
.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
.max_rings = 8,
.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
},
[HAL_REO_EXCEPTION] = {
/* Designating REO2SW0 ring as exception ring.
* Any of theREO2SW rings can be used as exception ring.
*/
.start_ring_id = HAL_SRNG_RING_ID_REO2SW0,
.max_rings = 1,
.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE,
},
[HAL_REO_REINJECT] = {
.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
.max_rings = 4,
.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
},
[HAL_REO_CMD] = {
.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
.max_rings = 1,
.entry_size = (sizeof(struct hal_tlv_64_hdr) +
sizeof(struct hal_reo_get_queue_stats)) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
},
[HAL_REO_STATUS] = {
.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
.max_rings = 1,
.entry_size = (sizeof(struct hal_tlv_64_hdr) +
sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
},
[HAL_TCL_DATA] = {
.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
.max_rings = 6,
.entry_size = sizeof(struct hal_tcl_data_cmd) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
},
[HAL_TCL_CMD] = {
.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
.max_rings = 1,
.entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
},
[HAL_TCL_STATUS] = {
.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
.max_rings = 1,
.entry_size = (sizeof(struct hal_tlv_hdr) +
sizeof(struct hal_tcl_status_ring)) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
},
[HAL_CE_SRC] = {
.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
.max_rings = 16,
.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
},
[HAL_CE_DST] = {
.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
.max_rings = 16,
.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
},
[HAL_CE_DST_STATUS] = {
.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
.max_rings = 16,
.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
},
[HAL_WBM_IDLE_LINK] = {
.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
.max_rings = 1,
.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
},
[HAL_SW2WBM_RELEASE] = {
.start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE,
.max_rings = 2,
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
},
[HAL_WBM2SW_RELEASE] = {
.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
.max_rings = 8,
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_UMAC,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
},
[HAL_RXDMA_BUF] = {
.start_ring_id = HAL_SRNG_SW2RXDMA_BUF0,
.max_rings = 1,
.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_DMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
[HAL_RXDMA_DST] = {
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
.max_rings = 0,
.entry_size = 0,
.mac_type = ATH12K_HAL_SRNG_PMAC,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
[HAL_RXDMA_MONITOR_BUF] = {
.start_ring_id = HAL_SRNG_SW2RXMON_BUF0,
.max_rings = 1,
.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
[HAL_RXDMA_MONITOR_STATUS] = { 0, },
[HAL_RXDMA_MONITOR_DESC] = { 0, },
[HAL_RXDMA_DIR_BUF] = {
.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
.max_rings = 2,
.entry_size = 8 >> 2, /* TODO: Define the struct */
.mac_type = ATH12K_HAL_SRNG_PMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
[HAL_PPE2TCL] = {
.start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1,
.max_rings = 1,
.entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
},
[HAL_PPE_RELEASE] = {
.start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE,
.max_rings = 1,
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
},
[HAL_TX_MONITOR_BUF] = {
.start_ring_id = HAL_SRNG_SW2TXMON_BUF0,
.max_rings = 1,
.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
[HAL_RXDMA_MONITOR_DST] = {
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0,
.max_rings = 1,
.entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
[HAL_TX_MONITOR_DST] = {
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
.max_rings = 1,
.entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
.ring_dir = HAL_SRNG_DIR_DST,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
}
};
static const struct ath12k_hal_tcl_to_wbm_rbm_map
ath12k_hal_qcn9274_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
{
.wbm_ring_num = 0,
.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
},
{
.wbm_ring_num = 1,
.rbm_id = HAL_RX_BUF_RBM_SW1_BM,
},
{
.wbm_ring_num = 2,
.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
},
{
.wbm_ring_num = 4,
.rbm_id = HAL_RX_BUF_RBM_SW4_BM,
}
};
static const struct ath12k_hal_tcl_to_wbm_rbm_map
ath12k_hal_wcn7850_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
{
.wbm_ring_num = 0,
.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
},
{
.wbm_ring_num = 2,
.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
},
{
.wbm_ring_num = 4,
.rbm_id = HAL_RX_BUF_RBM_SW4_BM,
},
};
static unsigned int ath12k_hal_reo1_ring_id_offset(struct ath12k_base *ab)
{
return HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab);
}
static unsigned int ath12k_hal_reo1_ring_msi1_base_lsb_offset(struct ath12k_base *ab)
{
return HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
}
static unsigned int ath12k_hal_reo1_ring_msi1_base_msb_offset(struct ath12k_base *ab)
{
return HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
}
static unsigned int ath12k_hal_reo1_ring_msi1_data_offset(struct ath12k_base *ab)
{
return HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab);
}
static unsigned int ath12k_hal_reo1_ring_base_msb_offset(struct ath12k_base *ab)
{
return HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
}
static unsigned int ath12k_hal_reo1_ring_producer_int_setup_offset(struct ath12k_base *ab)
{
return HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab);
}
static unsigned int ath12k_hal_reo1_ring_hp_addr_lsb_offset(struct ath12k_base *ab)
{
return HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
}
static unsigned int ath12k_hal_reo1_ring_hp_addr_msb_offset(struct ath12k_base *ab)
{
return HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
}
static unsigned int ath12k_hal_reo1_ring_misc_offset(struct ath12k_base *ab)
{
return HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab);
}
static bool ath12k_hw_qcn9274_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
{
return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
RX_MSDU_END_INFO5_FIRST_MSDU);
}
static bool ath12k_hw_qcn9274_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
{
return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
RX_MSDU_END_INFO5_LAST_MSDU);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
RX_MSDU_END_INFO5_L3_HDR_PADDING);
}
static bool ath12k_hw_qcn9274_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
}
static u32 ath12k_hw_qcn9274_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.mpdu_start.info2,
RX_MPDU_START_INFO2_ENC_TYPE);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_decap_type(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
RX_MSDU_END_INFO11_DECAP_FORMAT);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
}
static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
}
static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
}
static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
}
static u16 ath12k_hw_qcn9274_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info10,
RX_MSDU_END_INFO10_MSDU_LENGTH);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
RX_MSDU_END_INFO12_SGI);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
RX_MSDU_END_INFO12_RATE_MCS);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
RX_MSDU_END_INFO12_RECV_BW);
}
static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.qcn9274.msdu_end.phy_meta_data);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
RX_MSDU_END_INFO12_PKT_TYPE);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
RX_MSDU_END_INFO5_TID);
}
static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9274.mpdu_start.sw_peer_id);
}
static void ath12k_hw_qcn9274_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
memcpy(&fdesc->u.qcn9274.msdu_end, &ldesc->u.qcn9274.msdu_end,
sizeof(struct rx_msdu_end_qcn9274));
}
static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9274.mpdu_start.phy_ppdu_id);
}
static void ath12k_hw_qcn9274_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
{
u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info10);
info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
desc->u.qcn9274.msdu_end.info10 = __cpu_to_le32(info);
}
static u8 *ath12k_hw_qcn9274_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
{
return &desc->u.qcn9274.msdu_payload[0];
}
static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset(void)
{
return offsetof(struct hal_rx_desc_qcn9274, mpdu_start);
}
static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset(void)
{
return offsetof(struct hal_rx_desc_qcn9274, msdu_end);
}
static bool ath12k_hw_qcn9274_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) &
RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
}
static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
{
return desc->u.qcn9274.mpdu_start.addr2;
}
static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
RX_MSDU_END_INFO5_DA_IS_MCBC;
}
static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
hdr->frame_control = desc->u.qcn9274.mpdu_start.frame_ctrl;
hdr->duration_id = desc->u.qcn9274.mpdu_start.duration;
ether_addr_copy(hdr->addr1, desc->u.qcn9274.mpdu_start.addr1);
ether_addr_copy(hdr->addr2, desc->u.qcn9274.mpdu_start.addr2);
ether_addr_copy(hdr->addr3, desc->u.qcn9274.mpdu_start.addr3);
if (__le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) &
RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
ether_addr_copy(hdr->addr4, desc->u.qcn9274.mpdu_start.addr4);
}
hdr->seq_ctrl = desc->u.qcn9274.mpdu_start.seq_ctrl;
}
static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
unsigned int key_id;
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
return;
case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
case HAL_ENCRYPT_TYPE_TKIP_MIC:
crypto_hdr[0] =
HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]);
crypto_hdr[1] = 0;
crypto_hdr[2] =
HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]);
break;
case HAL_ENCRYPT_TYPE_CCMP_128:
case HAL_ENCRYPT_TYPE_CCMP_256:
case HAL_ENCRYPT_TYPE_GCMP_128:
case HAL_ENCRYPT_TYPE_AES_GCMP_256:
crypto_hdr[0] =
HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]);
crypto_hdr[1] =
HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]);
crypto_hdr[2] = 0;
break;
case HAL_ENCRYPT_TYPE_WEP_40:
case HAL_ENCRYPT_TYPE_WEP_104:
case HAL_ENCRYPT_TYPE_WEP_128:
case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
case HAL_ENCRYPT_TYPE_WAPI:
return;
}
key_id = le32_get_bits(desc->u.qcn9274.mpdu_start.info5,
RX_MPDU_START_INFO5_KEY_ID);
crypto_hdr[3] = 0x20 | (key_id << 6);
crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274.mpdu_start.pn[0]);
crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274.mpdu_start.pn[0]);
crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[1]);
crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]);
}
static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl);
}
static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
struct hal_srng_config *s;
hal->srng_config = kmemdup(hw_srng_config_template,
sizeof(hw_srng_config_template),
GFP_KERNEL);
if (!hal->srng_config)
return -ENOMEM;
s = &hal->srng_config[HAL_REO_DST];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
s = &hal->srng_config[HAL_REO_EXCEPTION];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
s = &hal->srng_config[HAL_REO_REINJECT];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
s->reg_size[0] = HAL_SW2REO1_RING_BASE_LSB(ab) - HAL_SW2REO_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_SW2REO1_RING_HP - HAL_SW2REO_RING_HP;
s = &hal->srng_config[HAL_REO_CMD];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
s = &hal->srng_config[HAL_REO_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
s = &hal->srng_config[HAL_TCL_DATA];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
s = &hal->srng_config[HAL_TCL_CMD];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
s = &hal->srng_config[HAL_TCL_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
s = &hal->srng_config[HAL_CE_SRC];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
s = &hal->srng_config[HAL_CE_DST];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
s = &hal->srng_config[HAL_CE_DST_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
HAL_CE_DST_STATUS_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
s = &hal->srng_config[HAL_WBM_IDLE_LINK];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
s = &hal->srng_config[HAL_SW2WBM_RELEASE];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
s->reg_size[0] = HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) -
HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_WBM_SW1_RELEASE_RING_HP - HAL_WBM_SW_RELEASE_RING_HP;
s = &hal->srng_config[HAL_WBM2SW_RELEASE];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
/* Some LMAC rings are not accessed from the host:
* RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS,
* RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC,
* RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA
*/
s = &hal->srng_config[HAL_PPE2TCL];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_HP;
s = &hal->srng_config[HAL_PPE_RELEASE];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_PPE_RELEASE_RING_HP;
return 0;
}
static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
RX_MSDU_END_INFO14_MSDU_DONE);
}
static bool ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13,
RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
}
static bool ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13,
RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
}
static bool ath12k_hw_qcn9274_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
{
return (le32_get_bits(desc->u.qcn9274.msdu_end.info14,
RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
RX_DESC_DECRYPT_STATUS_CODE_OK);
}
static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
{
u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info13);
u32 errmap = 0;
if (info & RX_MSDU_END_INFO13_FCS_ERR)
errmap |= HAL_RX_MPDU_ERR_FCS;
if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
errmap |= HAL_RX_MPDU_ERR_DECRYPT;
if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
return errmap;
}
const struct hal_ops hal_qcn9274_ops = {
.rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
.rx_desc_encrypt_valid = ath12k_hw_qcn9274_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath12k_hw_qcn9274_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath12k_hw_qcn9274_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_rx_desc_get_mesh_ctl,
.rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath12k_hw_qcn9274_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath12k_hw_qcn9274_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath12k_hw_qcn9274_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_end_tlv = ath12k_hw_qcn9274_rx_desc_copy_end_tlv,
.rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath12k_hw_qcn9274_rx_desc_set_msdu_len,
.rx_desc_get_msdu_payload = ath12k_hw_qcn9274_rx_desc_get_msdu_payload,
.rx_desc_get_mpdu_start_offset = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset,
.rx_desc_get_msdu_end_offset = ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset,
.rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2,
.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc,
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
.create_srng_config = ath12k_hal_srng_create_config_qcn9274,
.tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
};
static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
{
return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
RX_MSDU_END_INFO5_FIRST_MSDU);
}
static bool ath12k_hw_wcn7850_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
{
return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
RX_MSDU_END_INFO5_LAST_MSDU);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.wcn7850.msdu_end.info5,
RX_MSDU_END_INFO5_L3_HDR_PADDING);
}
static bool ath12k_hw_wcn7850_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
}
static u32 ath12k_hw_wcn7850_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.mpdu_start.info2,
RX_MPDU_START_INFO2_ENC_TYPE);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_decap_type(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
RX_MSDU_END_INFO11_DECAP_FORMAT);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
}
static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
}
static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
}
static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
}
static u16 ath12k_hw_wcn7850_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info10,
RX_MSDU_END_INFO10_MSDU_LENGTH);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
RX_MSDU_END_INFO12_SGI);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
RX_MSDU_END_INFO12_RATE_MCS);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
RX_MSDU_END_INFO12_RECV_BW);
}
static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.wcn7850.msdu_end.phy_meta_data);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
RX_MSDU_END_INFO12_PKT_TYPE);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.wcn7850.msdu_end.info5,
RX_MSDU_END_INFO5_TID);
}
static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.wcn7850.mpdu_start.sw_peer_id);
}
static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end,
sizeof(struct rx_msdu_end_qcn9274));
}
static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
{
return le64_get_bits(desc->u.wcn7850.mpdu_start_tag,
HAL_TLV_HDR_TAG);
}
static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.wcn7850.mpdu_start.phy_ppdu_id);
}
static void ath12k_hw_wcn7850_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
{
u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info10);
info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
desc->u.wcn7850.msdu_end.info10 = __cpu_to_le32(info);
}
static u8 *ath12k_hw_wcn7850_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
{
return &desc->u.wcn7850.msdu_payload[0];
}
static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset(void)
{
return offsetof(struct hal_rx_desc_wcn7850, mpdu_start_tag);
}
static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset(void)
{
return offsetof(struct hal_rx_desc_wcn7850, msdu_end_tag);
}
static bool ath12k_hw_wcn7850_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
{
return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
}
static u8 *ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
{
return desc->u.wcn7850.mpdu_start.addr2;
}
static bool ath12k_hw_wcn7850_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.wcn7850.msdu_end.info5) &
RX_MSDU_END_INFO5_DA_IS_MCBC;
}
static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
hdr->frame_control = desc->u.wcn7850.mpdu_start.frame_ctrl;
hdr->duration_id = desc->u.wcn7850.mpdu_start.duration;
ether_addr_copy(hdr->addr1, desc->u.wcn7850.mpdu_start.addr1);
ether_addr_copy(hdr->addr2, desc->u.wcn7850.mpdu_start.addr2);
ether_addr_copy(hdr->addr3, desc->u.wcn7850.mpdu_start.addr3);
if (__le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
ether_addr_copy(hdr->addr4, desc->u.wcn7850.mpdu_start.addr4);
}
hdr->seq_ctrl = desc->u.wcn7850.mpdu_start.seq_ctrl;
}
static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
unsigned int key_id;
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
return;
case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
case HAL_ENCRYPT_TYPE_TKIP_MIC:
crypto_hdr[0] =
HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
crypto_hdr[1] = 0;
crypto_hdr[2] =
HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
break;
case HAL_ENCRYPT_TYPE_CCMP_128:
case HAL_ENCRYPT_TYPE_CCMP_256:
case HAL_ENCRYPT_TYPE_GCMP_128:
case HAL_ENCRYPT_TYPE_AES_GCMP_256:
crypto_hdr[0] =
HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
crypto_hdr[1] =
HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
crypto_hdr[2] = 0;
break;
case HAL_ENCRYPT_TYPE_WEP_40:
case HAL_ENCRYPT_TYPE_WEP_104:
case HAL_ENCRYPT_TYPE_WEP_128:
case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
case HAL_ENCRYPT_TYPE_WAPI:
return;
}
key_id = u32_get_bits(__le32_to_cpu(desc->u.wcn7850.mpdu_start.info5),
RX_MPDU_START_INFO5_KEY_ID);
crypto_hdr[3] = 0x20 | (key_id << 6);
crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.wcn7850.mpdu_start.pn[0]);
crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.wcn7850.mpdu_start.pn[0]);
crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[1]);
crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
}
static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
{
return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl);
}
static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
struct hal_srng_config *s;
hal->srng_config = kmemdup(hw_srng_config_template,
sizeof(hw_srng_config_template),
GFP_KERNEL);
if (!hal->srng_config)
return -ENOMEM;
s = &hal->srng_config[HAL_REO_DST];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
s = &hal->srng_config[HAL_REO_EXCEPTION];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
s = &hal->srng_config[HAL_REO_REINJECT];
s->max_rings = 1;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
s = &hal->srng_config[HAL_REO_CMD];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
s = &hal->srng_config[HAL_REO_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
s = &hal->srng_config[HAL_TCL_DATA];
s->max_rings = 5;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
s = &hal->srng_config[HAL_TCL_CMD];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
s = &hal->srng_config[HAL_TCL_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
s = &hal->srng_config[HAL_CE_SRC];
s->max_rings = 12;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
s = &hal->srng_config[HAL_CE_DST];
s->max_rings = 12;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
s = &hal->srng_config[HAL_CE_DST_STATUS];
s->max_rings = 12;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
HAL_CE_DST_STATUS_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
s = &hal->srng_config[HAL_WBM_IDLE_LINK];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
s = &hal->srng_config[HAL_SW2WBM_RELEASE];
s->max_rings = 1;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
s = &hal->srng_config[HAL_WBM2SW_RELEASE];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
s = &hal->srng_config[HAL_RXDMA_BUF];
s->max_rings = 2;
s->mac_type = ATH12K_HAL_SRNG_PMAC;
s = &hal->srng_config[HAL_RXDMA_DST];
s->max_rings = 1;
s->entry_size = sizeof(struct hal_reo_entrance_ring) >> 2;
/* below rings are not used */
s = &hal->srng_config[HAL_RXDMA_DIR_BUF];
s->max_rings = 0;
s = &hal->srng_config[HAL_PPE2TCL];
s->max_rings = 0;
s = &hal->srng_config[HAL_PPE_RELEASE];
s->max_rings = 0;
s = &hal->srng_config[HAL_TX_MONITOR_BUF];
s->max_rings = 0;
s = &hal->srng_config[HAL_TX_MONITOR_DST];
s->max_rings = 0;
s = &hal->srng_config[HAL_PPE2TCL];
s->max_rings = 0;
return 0;
}
static bool ath12k_hw_wcn7850_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.wcn7850.msdu_end.info14,
RX_MSDU_END_INFO14_MSDU_DONE);
}
static bool ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
}
static bool ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
}
static bool ath12k_hw_wcn7850_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
{
return (le32_get_bits(desc->u.wcn7850.msdu_end.info14,
RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
RX_DESC_DECRYPT_STATUS_CODE_OK);
}
static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
{
u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info13);
u32 errmap = 0;
if (info & RX_MSDU_END_INFO13_FCS_ERR)
errmap |= HAL_RX_MPDU_ERR_FCS;
if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
errmap |= HAL_RX_MPDU_ERR_DECRYPT;
if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
return errmap;
}
const struct hal_ops hal_wcn7850_ops = {
.rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes,
.rx_desc_encrypt_valid = ath12k_hw_wcn7850_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath12k_hw_wcn7850_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath12k_hw_wcn7850_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath12k_hw_wcn7850_rx_desc_get_mesh_ctl,
.rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath12k_hw_wcn7850_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath12k_hw_wcn7850_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath12k_hw_wcn7850_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath12k_hw_wcn7850_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath12k_hw_wcn7850_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_end_tlv = ath12k_hw_wcn7850_rx_desc_copy_end_tlv,
.rx_desc_get_mpdu_start_tag = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath12k_hw_wcn7850_rx_desc_set_msdu_len,
.rx_desc_get_msdu_payload = ath12k_hw_wcn7850_rx_desc_get_msdu_payload,
.rx_desc_get_mpdu_start_offset = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset,
.rx_desc_get_msdu_end_offset = ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset,
.rx_desc_mac_addr2_valid = ath12k_hw_wcn7850_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2,
.rx_desc_is_da_mcbc = ath12k_hw_wcn7850_rx_desc_is_da_mcbc,
.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
.create_srng_config = ath12k_hal_srng_create_config_wcn7850,
.tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err,
};
static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
size_t size;
size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
GFP_KERNEL);
if (!hal->rdp.vaddr)
return -ENOMEM;
return 0;
}
static void ath12k_hal_free_cont_rdp(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
size_t size;
if (!hal->rdp.vaddr)
return;
size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
dma_free_coherent(ab->dev, size,
hal->rdp.vaddr, hal->rdp.paddr);
hal->rdp.vaddr = NULL;
}
static int ath12k_hal_alloc_cont_wrp(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
size_t size;
size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
GFP_KERNEL);
if (!hal->wrp.vaddr)
return -ENOMEM;
return 0;
}
static void ath12k_hal_free_cont_wrp(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
size_t size;
if (!hal->wrp.vaddr)
return;
size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
dma_free_coherent(ab->dev, size,
hal->wrp.vaddr, hal->wrp.paddr);
hal->wrp.vaddr = NULL;
}
static void ath12k_hal_ce_dst_setup(struct ath12k_base *ab,
struct hal_srng *srng, int ring_num)
{
struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
u32 addr;
u32 val;
addr = HAL_CE_DST_RING_CTRL +
srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
val = ath12k_hif_read32(ab, addr);
val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
val |= u32_encode_bits(srng->u.dst_ring.max_buffer_length,
HAL_CE_DST_R0_DEST_CTRL_MAX_LEN);
ath12k_hif_write32(ab, addr, val);
}
static void ath12k_hal_srng_dst_hw_init(struct ath12k_base *ab,
struct hal_srng *srng)
{
struct ath12k_hal *hal = &ab->hal;
u32 val;
u64 hp_addr;
u32 reg_base;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
ath12k_hif_write32(ab, reg_base +
ath12k_hal_reo1_ring_msi1_base_lsb_offset(ab),
srng->msi_addr);
val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
HAL_REO1_RING_MSI1_BASE_MSB_ADDR) |
HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
ath12k_hif_write32(ab, reg_base +
ath12k_hal_reo1_ring_msi1_base_msb_offset(ab), val);
ath12k_hif_write32(ab,
reg_base + ath12k_hal_reo1_ring_msi1_data_offset(ab),
srng->msi_data);
}
ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
u32_encode_bits((srng->entry_size * srng->num_entries),
HAL_REO1_RING_BASE_MSB_RING_SIZE);
ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_base_msb_offset(ab), val);
val = u32_encode_bits(srng->ring_id, HAL_REO1_RING_ID_RING_ID) |
u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_id_offset(ab), val);
/* interrupt setup */
val = u32_encode_bits((srng->intr_timer_thres_us >> 3),
HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD);
val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD);
ath12k_hif_write32(ab,
reg_base + ath12k_hal_reo1_ring_producer_int_setup_offset(ab),
val);
hp_addr = hal->rdp.paddr +
((unsigned long)srng->u.dst_ring.hp_addr -
(unsigned long)hal->rdp.vaddr);
ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_lsb_offset(ab),
hp_addr & HAL_ADDR_LSB_REG_MASK);
ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_msb_offset(ab),
hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
/* Initialize head and tail pointers to indicate ring is empty */
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
ath12k_hif_write32(ab, reg_base, 0);
ath12k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
*srng->u.dst_ring.hp_addr = 0;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
val = 0;
if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
val |= HAL_REO1_RING_MISC_MSI_SWAP;
val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_misc_offset(ab), val);
}
static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
struct hal_srng *srng)
{
struct ath12k_hal *hal = &ab->hal;
u32 val;
u64 tp_addr;
u32 reg_base;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
ath12k_hif_write32(ab, reg_base +
HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
srng->msi_addr);
val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
HAL_TCL1_RING_MSI1_BASE_MSB_ADDR) |
HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
ath12k_hif_write32(ab, reg_base +
HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
val);
ath12k_hif_write32(ab, reg_base +
HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
srng->msi_data);
}
ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
u32_encode_bits((srng->entry_size * srng->num_entries),
HAL_TCL1_RING_BASE_MSB_RING_SIZE);
ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
val = u32_encode_bits(srng->intr_timer_thres_us,
HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD);
val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD);
ath12k_hif_write32(ab,
reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
val);
val = 0;
if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
val |= u32_encode_bits(srng->u.src_ring.low_threshold,
HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD);
}
ath12k_hif_write32(ab,
reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
val);
if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
tp_addr = hal->rdp.paddr +
((unsigned long)srng->u.src_ring.tp_addr -
(unsigned long)hal->rdp.vaddr);
ath12k_hif_write32(ab,
reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
tp_addr & HAL_ADDR_LSB_REG_MASK);
ath12k_hif_write32(ab,
reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
}
/* Initialize head and tail pointers to indicate ring is empty */
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
ath12k_hif_write32(ab, reg_base, 0);
ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
*srng->u.src_ring.tp_addr = 0;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
val = 0;
if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
val |= HAL_TCL1_RING_MISC_MSI_SWAP;
/* Loop count is not used for SRC rings */
val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK)
val |= HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE;
ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
}
static void ath12k_hal_srng_hw_init(struct ath12k_base *ab,
struct hal_srng *srng)
{
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
ath12k_hal_srng_src_hw_init(ab, srng);
else
ath12k_hal_srng_dst_hw_init(ab, srng);
}
static int ath12k_hal_srng_get_ring_id(struct ath12k_base *ab,
enum hal_ring_type type,
int ring_num, int mac_id)
{
struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
int ring_id;
if (ring_num >= srng_config->max_rings) {
ath12k_warn(ab, "invalid ring number :%d\n", ring_num);
return -EINVAL;
}
ring_id = srng_config->start_ring_id + ring_num;
if (srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
ring_id += mac_id * HAL_SRNG_RINGS_PER_PMAC;
if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
return -EINVAL;
return ring_id;
}
int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type)
{
struct hal_srng_config *srng_config;
if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
return -EINVAL;
srng_config = &ab->hal.srng_config[ring_type];
return (srng_config->entry_size << 2);
}
int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type)
{
struct hal_srng_config *srng_config;
if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
return -EINVAL;
srng_config = &ab->hal.srng_config[ring_type];
return (srng_config->max_size / srng_config->entry_size);
}
void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
struct hal_srng_params *params)
{
params->ring_base_paddr = srng->ring_base_paddr;
params->ring_base_vaddr = srng->ring_base_vaddr;
params->num_entries = srng->num_entries;
params->intr_timer_thres_us = srng->intr_timer_thres_us;
params->intr_batch_cntr_thres_entries =
srng->intr_batch_cntr_thres_entries;
params->low_threshold = srng->u.src_ring.low_threshold;
params->msi_addr = srng->msi_addr;
params->msi2_addr = srng->msi2_addr;
params->msi_data = srng->msi_data;
params->msi2_data = srng->msi2_data;
params->flags = srng->flags;
}
dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
struct hal_srng *srng)
{
if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
return 0;
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
return ab->hal.wrp.paddr +
((unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->hal.wrp.vaddr);
else
return ab->hal.rdp.paddr +
((unsigned long)srng->u.dst_ring.hp_addr -
(unsigned long)ab->hal.rdp.vaddr);
}
dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
struct hal_srng *srng)
{
if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
return 0;
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
return ab->hal.rdp.paddr +
((unsigned long)srng->u.src_ring.tp_addr -
(unsigned long)ab->hal.rdp.vaddr);
else
return ab->hal.wrp.paddr +
((unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->hal.wrp.vaddr);
}
u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type)
{
switch (type) {
case HAL_CE_DESC_SRC:
return sizeof(struct hal_ce_srng_src_desc);
case HAL_CE_DESC_DST:
return sizeof(struct hal_ce_srng_dest_desc);
case HAL_CE_DESC_DST_STATUS:
return sizeof(struct hal_ce_srng_dst_status_desc);
}
return 0;
}
void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
u32 len, u32 id, u8 byte_swap_data)
{
desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
desc->buffer_addr_info =
le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI) |
le32_encode_bits(byte_swap_data,
HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP) |
le32_encode_bits(0, HAL_CE_SRC_DESC_ADDR_INFO_GATHER) |
le32_encode_bits(len, HAL_CE_SRC_DESC_ADDR_INFO_LEN);
desc->meta_info = le32_encode_bits(id, HAL_CE_SRC_DESC_META_INFO_DATA);
}
void ath12k_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, dma_addr_t paddr)
{
desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
desc->buffer_addr_info =
le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI);
}
u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc)
{
u32 len;
len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
return len;
}
void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
dma_addr_t paddr)
{
desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
BUFFER_ADDR_INFO0_ADDR);
desc->buf_addr_info.info1 =
le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
BUFFER_ADDR_INFO1_ADDR) |
le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
}
void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
return NULL;
}
void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng)
{
void *desc;
lockdep_assert_held(&srng->lock);
if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
return NULL;
desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
srng->ring_size;
return desc;
}
int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr)
{
u32 tp, hp;
lockdep_assert_held(&srng->lock);
tp = srng->u.dst_ring.tp;
if (sync_hw_ptr) {
hp = *srng->u.dst_ring.hp_addr;
srng->u.dst_ring.cached_hp = hp;
} else {
hp = srng->u.dst_ring.cached_hp;
}
if (hp >= tp)
return (hp - tp) / srng->entry_size;
else
return (srng->ring_size - tp + hp) / srng->entry_size;
}
/* Returns number of available entries in src ring */
int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr)
{
u32 tp, hp;
lockdep_assert_held(&srng->lock);
hp = srng->u.src_ring.hp;
if (sync_hw_ptr) {
tp = *srng->u.src_ring.tp_addr;
srng->u.src_ring.cached_tp = tp;
} else {
tp = srng->u.src_ring.cached_tp;
}
if (tp > hp)
return ((tp - hp) / srng->entry_size) - 1;
else
return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
}
void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng)
{
void *desc;
u32 next_hp;
lockdep_assert_held(&srng->lock);
/* TODO: Using % is expensive, but we have to do this since size of some
* SRNG rings is not power of 2 (due to descriptor sizes). Need to see
* if separate function is defined for rings having power of 2 ring size
* (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
* overhead of % by using mask (with &).
*/
next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
if (next_hp == srng->u.src_ring.cached_tp)
return NULL;
desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
srng->u.src_ring.hp = next_hp;
/* TODO: Reap functionality is not used by all rings. If particular
* ring does not use reap functionality, we need not update reap_hp
* with next_hp pointer. Need to make sure a separate function is used
* before doing any optimization by removing below code updating
* reap_hp.
*/
srng->u.src_ring.reap_hp = next_hp;
return desc;
}
void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
struct hal_srng *srng)
{
void *desc;
u32 next_reap_hp;
lockdep_assert_held(&srng->lock);
next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
srng->ring_size;
if (next_reap_hp == srng->u.src_ring.cached_tp)
return NULL;
desc = srng->ring_base_vaddr + next_reap_hp;
srng->u.src_ring.reap_hp = next_reap_hp;
return desc;
}
void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
struct hal_srng *srng)
{
void *desc;
lockdep_assert_held(&srng->lock);
if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
return NULL;
desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
srng->ring_size;
return desc;
}
void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
else
srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
}
/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
* should have been called before this.
*/
void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
/* TODO: See if we need a write memory barrier here */
if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
/* For LMAC rings, ring pointer updates are done through FW and
* hence written to a shared memory location that is read by FW
*/
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
}
} else {
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
ath12k_hif_write32(ab,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem,
srng->u.src_ring.hp);
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
ath12k_hif_write32(ab,
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem,
srng->u.dst_ring.tp);
}
}
srng->timestamp = jiffies;
}
void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
struct hal_wbm_idle_scatter_list *sbuf,
u32 nsbufs, u32 tot_link_desc,
u32 end_offset)
{
struct ath12k_buffer_addr *link_addr;
int i;
u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
u32 val;
link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
for (i = 1; i < nsbufs; i++) {
link_addr->info0 = cpu_to_le32(sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK);
link_addr->info1 =
le32_encode_bits((u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT,
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
le32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG);
link_addr = (void *)sbuf[i].vaddr +
HAL_WBM_IDLE_SCATTER_BUF_SIZE;
}
val = u32_encode_bits(reg_scatter_buf_sz, HAL_WBM_SCATTER_BUFFER_SIZE) |
u32_encode_bits(0x1, HAL_WBM_LINK_DESC_IDLE_LIST_MODE);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(ab),
val);
val = u32_encode_bits(reg_scatter_buf_sz * nsbufs,
HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(ab),
val);
val = u32_encode_bits(sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK,
BUFFER_ADDR_INFO0_ADDR);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_RING_BASE_LSB(ab),
val);
val = u32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG) |
u32_encode_bits((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT,
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_RING_BASE_MSB(ab),
val);
/* Setup head and tail pointers for the idle list */
val = u32_encode_bits(sbuf[nsbufs - 1].paddr, BUFFER_ADDR_INFO0_ADDR);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab),
val);
val = u32_encode_bits(((u64)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT),
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
u32_encode_bits((end_offset >> 2),
HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(ab),
val);
val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab),
val);
val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(ab),
val);
val = u32_encode_bits(((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT),
HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
u32_encode_bits(0, HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(ab),
val);
val = 2 * tot_link_desc;
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(ab),
val);
/* Enable the SRNG */
val = u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE) |
u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE);
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_WBM_REG +
HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab),
val);
}
int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
int ring_num, int mac_id,
struct hal_srng_params *params)
{
struct ath12k_hal *hal = &ab->hal;
struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
struct hal_srng *srng;
int ring_id;
u32 idx;
int i;
u32 reg_base;
ring_id = ath12k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
if (ring_id < 0)
return ring_id;
srng = &hal->srng_list[ring_id];
srng->ring_id = ring_id;
srng->ring_dir = srng_config->ring_dir;
srng->ring_base_paddr = params->ring_base_paddr;
srng->ring_base_vaddr = params->ring_base_vaddr;
srng->entry_size = srng_config->entry_size;
srng->num_entries = params->num_entries;
srng->ring_size = srng->entry_size * srng->num_entries;
srng->intr_batch_cntr_thres_entries =
params->intr_batch_cntr_thres_entries;
srng->intr_timer_thres_us = params->intr_timer_thres_us;
srng->flags = params->flags;
srng->msi_addr = params->msi_addr;
srng->msi2_addr = params->msi2_addr;
srng->msi_data = params->msi_data;
srng->msi2_data = params->msi2_data;
srng->initialized = 1;
spin_lock_init(&srng->lock);
lockdep_set_class(&srng->lock, &srng->lock_key);
for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
srng->hwreg_base[i] = srng_config->reg_start[i] +
(ring_num * srng_config->reg_size[i]);
}
memset(srng->ring_base_vaddr, 0,
(srng->entry_size * srng->num_entries) << 2);
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.hp = 0;
srng->u.src_ring.cached_tp = 0;
srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
srng->u.src_ring.low_threshold = params->low_threshold *
srng->entry_size;
if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
if (!ab->hw_params->supports_shadow_regs)
srng->u.src_ring.hp_addr =
(u32 *)((unsigned long)ab->mem + reg_base);
else
ath12k_dbg(ab, ATH12K_DBG_HAL,
"hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
type, ring_num,
reg_base,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem);
} else {
idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
idx);
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
}
} else {
/* During initialization loop count in all the descriptors
* will be set to zero, and HW will set it to 1 on completing
* descriptor update in first loop, and increments it by 1 on
* subsequent loops (loop count wraps around after reaching
* 0xffff). The 'loop_cnt' in SW ring state is the expected
* loop count in descriptors updated by HW (to be processed
* by SW).
*/
srng->u.dst_ring.loop_cnt = 1;
srng->u.dst_ring.tp = 0;
srng->u.dst_ring.cached_hp = 0;
srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
if (!ab->hw_params->supports_shadow_regs)
srng->u.dst_ring.tp_addr =
(u32 *)((unsigned long)ab->mem + reg_base +
(HAL_REO1_RING_TP - HAL_REO1_RING_HP));
else
ath12k_dbg(ab, ATH12K_DBG_HAL,
"type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
type, ring_num,
reg_base + HAL_REO1_RING_TP - HAL_REO1_RING_HP,
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem);
} else {
/* For PMAC & DMAC rings, tail pointer updates will be done
* through FW by writing to a shared memory location
*/
idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
idx);
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
}
}
if (srng_config->mac_type != ATH12K_HAL_SRNG_UMAC)
return ring_id;
ath12k_hal_srng_hw_init(ab, srng);
if (type == HAL_CE_DST) {
srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
ath12k_hal_ce_dst_setup(ab, srng, ring_num);
}
return ring_id;
}
static void ath12k_hal_srng_update_hp_tp_addr(struct ath12k_base *ab,
int shadow_cfg_idx,
enum hal_ring_type ring_type,
int ring_num)
{
struct hal_srng *srng;
struct ath12k_hal *hal = &ab->hal;
int ring_id;
struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
ring_id = ath12k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
if (ring_id < 0)
return;
srng = &hal->srng_list[ring_id];
if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
(unsigned long)ab->mem);
else
srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
(unsigned long)ab->mem);
}
int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
enum hal_ring_type ring_type,
int ring_num)
{
struct ath12k_hal *hal = &ab->hal;
struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
int shadow_cfg_idx = hal->num_shadow_reg_configured;
u32 target_reg;
if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
return -EINVAL;
hal->num_shadow_reg_configured++;
target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
ring_num;
/* For destination ring, shadow the TP */
if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
target_reg += HAL_OFFSET_FROM_HP_TO_TP;
hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
/* update hp/tp addr to hal structure*/
ath12k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
ring_num);
ath12k_dbg(ab, ATH12K_DBG_HAL,
"target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
target_reg,
HAL_SHADOW_REG(shadow_cfg_idx),
shadow_cfg_idx,
ring_type, ring_num);
return 0;
}
void ath12k_hal_srng_shadow_config(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
int ring_type, ring_num;
/* update all the non-CE srngs. */
for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
if (ring_type == HAL_CE_SRC ||
ring_type == HAL_CE_DST ||
ring_type == HAL_CE_DST_STATUS)
continue;
if (srng_config->mac_type == ATH12K_HAL_SRNG_DMAC ||
srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
continue;
for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
ath12k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
}
}
void ath12k_hal_srng_get_shadow_config(struct ath12k_base *ab,
u32 **cfg, u32 *len)
{
struct ath12k_hal *hal = &ab->hal;
*len = hal->num_shadow_reg_configured;
*cfg = hal->shadow_reg_addr;
}
void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
/* check whether the ring is empty. Update the shadow
* HP only when then ring isn't' empty.
*/
if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
*srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
ath12k_hal_srng_access_end(ab, srng);
}
static void ath12k_hal_register_srng_lock_keys(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
u32 ring_id;
for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
lockdep_register_key(&hal->srng_list[ring_id].lock_key);
}
static void ath12k_hal_unregister_srng_lock_keys(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
u32 ring_id;
for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
lockdep_unregister_key(&hal->srng_list[ring_id].lock_key);
}
int ath12k_hal_srng_init(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
int ret;
memset(hal, 0, sizeof(*hal));
ret = ab->hw_params->hal_ops->create_srng_config(ab);
if (ret)
goto err_hal;
ret = ath12k_hal_alloc_cont_rdp(ab);
if (ret)
goto err_hal;
ret = ath12k_hal_alloc_cont_wrp(ab);
if (ret)
goto err_free_cont_rdp;
ath12k_hal_register_srng_lock_keys(ab);
return 0;
err_free_cont_rdp:
ath12k_hal_free_cont_rdp(ab);
err_hal:
return ret;
}
void ath12k_hal_srng_deinit(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
ath12k_hal_unregister_srng_lock_keys(ab);
ath12k_hal_free_cont_rdp(ab);
ath12k_hal_free_cont_wrp(ab);
kfree(hal->srng_config);
hal->srng_config = NULL;
}
void ath12k_hal_dump_srng_stats(struct ath12k_base *ab)
{
struct hal_srng *srng;
struct ath12k_ext_irq_grp *irq_grp;
struct ath12k_ce_pipe *ce_pipe;
int i;
ath12k_err(ab, "Last interrupt received for each CE:\n");
for (i = 0; i < ab->hw_params->ce_count; i++) {
ce_pipe = &ab->ce.ce_pipe[i];
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath12k_err(ab, "CE_id %d pipe_num %d %ums before\n",
i, ce_pipe->pipe_num,
jiffies_to_msecs(jiffies - ce_pipe->timestamp));
}
ath12k_err(ab, "\nLast interrupt received for each group:\n");
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
irq_grp = &ab->ext_irq_grp[i];
ath12k_err(ab, "group_id %d %ums before\n",
irq_grp->grp_id,
jiffies_to_msecs(jiffies - irq_grp->timestamp));
}
for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
srng = &ab->hal.srng_list[i];
if (!srng->initialized)
continue;
if (srng->ring_dir == HAL_SRNG_DIR_SRC)
ath12k_err(ab,
"src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
srng->ring_id, srng->u.src_ring.hp,
srng->u.src_ring.reap_hp,
*srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
srng->u.src_ring.last_tp,
jiffies_to_msecs(jiffies - srng->timestamp));
else if (srng->ring_dir == HAL_SRNG_DIR_DST)
ath12k_err(ab,
"dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
srng->ring_id, srng->u.dst_ring.tp,
*srng->u.dst_ring.hp_addr,
srng->u.dst_ring.cached_hp,
srng->u.dst_ring.last_hp,
jiffies_to_msecs(jiffies - srng->timestamp));
}
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/hal.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include "debug.h"
#include "hif.h"
struct sk_buff *ath12k_htc_alloc_skb(struct ath12k_base *ab, int size)
{
struct sk_buff *skb;
skb = dev_alloc_skb(size + sizeof(struct ath12k_htc_hdr));
if (!skb)
return NULL;
skb_reserve(skb, sizeof(struct ath12k_htc_hdr));
/* FW/HTC requires 4-byte aligned streams */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath12k_warn(ab, "Unaligned HTC tx skb\n");
return skb;
}
static void ath12k_htc_control_tx_complete(struct ath12k_base *ab,
struct sk_buff *skb)
{
kfree_skb(skb);
}
static struct sk_buff *ath12k_htc_build_tx_ctrl_skb(void)
{
struct sk_buff *skb;
struct ath12k_skb_cb *skb_cb;
skb = dev_alloc_skb(ATH12K_HTC_CONTROL_BUFFER_SIZE);
if (!skb)
return NULL;
skb_reserve(skb, sizeof(struct ath12k_htc_hdr));
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
skb_cb = ATH12K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
return skb;
}
static void ath12k_htc_prepare_tx_skb(struct ath12k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath12k_htc_hdr *hdr;
hdr = (struct ath12k_htc_hdr *)skb->data;
memset(hdr, 0, sizeof(*hdr));
hdr->htc_info = le32_encode_bits(ep->eid, HTC_HDR_ENDPOINTID) |
le32_encode_bits((skb->len - sizeof(*hdr)),
HTC_HDR_PAYLOADLEN);
if (ep->tx_credit_flow_enabled)
hdr->htc_info |= le32_encode_bits(ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE,
HTC_HDR_FLAGS);
spin_lock_bh(&ep->htc->tx_lock);
hdr->ctrl_info = le32_encode_bits(ep->seq_no++, HTC_HDR_CONTROLBYTES1);
spin_unlock_bh(&ep->htc->tx_lock);
}
int ath12k_htc_send(struct ath12k_htc *htc,
enum ath12k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath12k_htc_ep *ep = &htc->endpoint[eid];
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct device *dev = htc->ab->dev;
struct ath12k_base *ab = htc->ab;
int credits = 0;
int ret;
if (eid >= ATH12K_HTC_EP_COUNT) {
ath12k_warn(ab, "Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
skb_push(skb, sizeof(struct ath12k_htc_hdr));
if (ep->tx_credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
ath12k_dbg(ab, ATH12K_DBG_HTC,
"htc insufficient credits ep %d required %d available %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
ath12k_dbg(ab, ATH12K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
ath12k_htc_prepare_tx_skb(ep, skb);
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret) {
ret = -EIO;
goto err_credits;
}
ret = ath12k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
if (ret)
goto err_unmap;
return 0;
err_unmap:
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath12k_dbg(ab, ATH12K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ab);
}
err_pull:
skb_pull(skb, sizeof(struct ath12k_htc_hdr));
return ret;
}
static void
ath12k_htc_process_credit_report(struct ath12k_htc *htc,
const struct ath12k_htc_credit_report *report,
int len,
enum ath12k_htc_ep_id eid)
{
struct ath12k_base *ab = htc->ab;
struct ath12k_htc_ep *ep;
int i, n_reports;
if (len % sizeof(*report))
ath12k_warn(ab, "Uneven credit report len %d", len);
n_reports = len / sizeof(*report);
spin_lock_bh(&htc->tx_lock);
for (i = 0; i < n_reports; i++, report++) {
if (report->eid >= ATH12K_HTC_EP_COUNT)
break;
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
ath12k_dbg(ab, ATH12K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ab);
spin_lock_bh(&htc->tx_lock);
}
}
spin_unlock_bh(&htc->tx_lock);
}
static int ath12k_htc_process_trailer(struct ath12k_htc *htc,
u8 *buffer,
int length,
enum ath12k_htc_ep_id src_eid)
{
struct ath12k_base *ab = htc->ab;
int status = 0;
struct ath12k_htc_record *record;
size_t len;
while (length > 0) {
record = (struct ath12k_htc_record *)buffer;
if (length < sizeof(record->hdr)) {
status = -EINVAL;
break;
}
if (record->hdr.len > length) {
/* no room left in buffer for record */
ath12k_warn(ab, "Invalid record length: %d\n",
record->hdr.len);
status = -EINVAL;
break;
}
switch (record->hdr.id) {
case ATH12K_HTC_RECORD_CREDITS:
len = sizeof(struct ath12k_htc_credit_report);
if (record->hdr.len < len) {
ath12k_warn(ab, "Credit report too long\n");
status = -EINVAL;
break;
}
ath12k_htc_process_credit_report(htc,
record->credit_report,
record->hdr.len,
src_eid);
break;
default:
ath12k_warn(ab, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
if (status)
break;
/* multiple records may be present in a trailer */
buffer += sizeof(record->hdr) + record->hdr.len;
length -= sizeof(record->hdr) + record->hdr.len;
}
return status;
}
static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack)
{
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot suspend complete %d\n", ack);
if (ack)
set_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
else
clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
complete(&ab->htc_suspend);
}
void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
struct sk_buff *skb)
{
int status = 0;
struct ath12k_htc *htc = &ab->htc;
struct ath12k_htc_hdr *hdr;
struct ath12k_htc_ep *ep;
u16 payload_len;
u32 trailer_len = 0;
size_t min_len;
u8 eid;
bool trailer_present;
hdr = (struct ath12k_htc_hdr *)skb->data;
skb_pull(skb, sizeof(*hdr));
eid = le32_get_bits(hdr->htc_info, HTC_HDR_ENDPOINTID);
if (eid >= ATH12K_HTC_EP_COUNT) {
ath12k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
goto out;
}
ep = &htc->endpoint[eid];
payload_len = le32_get_bits(hdr->htc_info, HTC_HDR_PAYLOADLEN);
if (payload_len + sizeof(*hdr) > ATH12K_HTC_MAX_LEN) {
ath12k_warn(ab, "HTC rx frame too long, len: %zu\n",
payload_len + sizeof(*hdr));
goto out;
}
if (skb->len < payload_len) {
ath12k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
skb->len, payload_len);
goto out;
}
/* get flags to check for trailer */
trailer_present = le32_get_bits(hdr->htc_info, HTC_HDR_FLAGS) &
ATH12K_HTC_FLAG_TRAILER_PRESENT;
if (trailer_present) {
u8 *trailer;
trailer_len = le32_get_bits(hdr->ctrl_info,
HTC_HDR_CONTROLBYTES0);
min_len = sizeof(struct ath12k_htc_record_hdr);
if ((trailer_len < min_len) ||
(trailer_len > payload_len)) {
ath12k_warn(ab, "Invalid trailer length: %d\n",
trailer_len);
goto out;
}
trailer = (u8 *)hdr;
trailer += sizeof(*hdr);
trailer += payload_len;
trailer -= trailer_len;
status = ath12k_htc_process_trailer(htc, trailer,
trailer_len, eid);
if (status)
goto out;
skb_trim(skb, skb->len - trailer_len);
}
if (trailer_len >= payload_len)
/* zero length packet with trailer data, just drop these */
goto out;
if (eid == ATH12K_HTC_EP_0) {
struct ath12k_htc_msg *msg = (struct ath12k_htc_msg *)skb->data;
switch (le32_get_bits(msg->msg_svc_id, HTC_MSG_MESSAGEID)) {
case ATH12K_HTC_MSG_READY_ID:
case ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/* this is a fatal error, target should not be
* sending unsolicited messages on the ep 0
*/
ath12k_warn(ab, "HTC rx ctrl still processing\n");
complete(&htc->ctl_resp);
goto out;
}
htc->control_resp_len =
min_t(int, skb->len,
ATH12K_HTC_MAX_CTRL_MSG_LEN);
memcpy(htc->control_resp_buffer, skb->data,
htc->control_resp_len);
complete(&htc->ctl_resp);
break;
case ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE:
ath12k_htc_suspend_complete(ab, true);
break;
case ATH12K_HTC_MSG_NACK_SUSPEND:
ath12k_htc_suspend_complete(ab, false);
break;
case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
break;
default:
ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n",
le32_get_bits(msg->msg_svc_id, HTC_MSG_MESSAGEID));
break;
}
goto out;
}
ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ab, skb);
/* poll tx completion for interrupt disabled CE's */
ath12k_ce_poll_send_completed(ab, ep->ul_pipe_id);
/* skb is now owned by the rx completion handler */
skb = NULL;
out:
kfree_skb(skb);
}
static void ath12k_htc_control_rx_complete(struct ath12k_base *ab,
struct sk_buff *skb)
{
/* This is unexpected. FW is not supposed to send regular rx on this
* endpoint.
*/
ath12k_warn(ab, "unexpected htc rx\n");
kfree_skb(skb);
}
static const char *htc_service_name(enum ath12k_htc_svc_id id)
{
switch (id) {
case ATH12K_HTC_SVC_ID_RESERVED:
return "Reserved";
case ATH12K_HTC_SVC_ID_RSVD_CTRL:
return "Control";
case ATH12K_HTC_SVC_ID_WMI_CONTROL:
return "WMI";
case ATH12K_HTC_SVC_ID_WMI_DATA_BE:
return "DATA BE";
case ATH12K_HTC_SVC_ID_WMI_DATA_BK:
return "DATA BK";
case ATH12K_HTC_SVC_ID_WMI_DATA_VI:
return "DATA VI";
case ATH12K_HTC_SVC_ID_WMI_DATA_VO:
return "DATA VO";
case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1:
return "WMI MAC1";
case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2:
return "WMI MAC2";
case ATH12K_HTC_SVC_ID_NMI_CONTROL:
return "NMI Control";
case ATH12K_HTC_SVC_ID_NMI_DATA:
return "NMI Data";
case ATH12K_HTC_SVC_ID_HTT_DATA_MSG:
return "HTT Data";
case ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS:
return "RAW";
case ATH12K_HTC_SVC_ID_IPA_TX:
return "IPA TX";
case ATH12K_HTC_SVC_ID_PKT_LOG:
return "PKT LOG";
case ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG:
return "WMI DIAG";
}
return "Unknown";
}
static void ath12k_htc_reset_endpoint_states(struct ath12k_htc *htc)
{
struct ath12k_htc_ep *ep;
int i;
for (i = ATH12K_HTC_EP_0; i < ATH12K_HTC_EP_COUNT; i++) {
ep = &htc->endpoint[i];
ep->service_id = ATH12K_HTC_SVC_ID_UNUSED;
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
}
}
static u8 ath12k_htc_get_credit_allocation(struct ath12k_htc *htc,
u16 service_id)
{
struct ath12k_htc_svc_tx_credits *serv_entry;
u8 i, allocation = 0;
serv_entry = htc->service_alloc_table;
for (i = 0; i < ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
if (serv_entry[i].service_id == service_id) {
allocation = serv_entry[i].credit_allocation;
break;
}
}
return allocation;
}
static int ath12k_htc_setup_target_buffer_assignments(struct ath12k_htc *htc)
{
struct ath12k_htc_svc_tx_credits *serv_entry;
static const u32 svc_id[] = {
ATH12K_HTC_SVC_ID_WMI_CONTROL,
ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2,
};
int i, credits;
credits = htc->total_transmit_credits;
serv_entry = htc->service_alloc_table;
if ((htc->wmi_ep_count == 0) ||
(htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
return -EINVAL;
/* Divide credits among number of endpoints for WMI */
credits = credits / htc->wmi_ep_count;
for (i = 0; i < htc->wmi_ep_count; i++) {
serv_entry[i].service_id = svc_id[i];
serv_entry[i].credit_allocation = credits;
}
return 0;
}
int ath12k_htc_wait_target(struct ath12k_htc *htc)
{
int i, status = 0;
struct ath12k_base *ab = htc->ab;
unsigned long time_left;
struct ath12k_htc_ready *ready;
u16 message_id;
u16 credit_count;
u16 credit_size;
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH12K_HTC_WAIT_TIMEOUT_HZ);
if (!time_left) {
ath12k_warn(ab, "failed to receive control response completion, polling..\n");
for (i = 0; i < ab->hw_params->ce_count; i++)
ath12k_ce_per_engine_service(htc->ab, i);
time_left =
wait_for_completion_timeout(&htc->ctl_resp,
ATH12K_HTC_WAIT_TIMEOUT_HZ);
if (!time_left)
status = -ETIMEDOUT;
}
if (status < 0) {
ath12k_warn(ab, "ctl_resp never came in (%d)\n", status);
return status;
}
if (htc->control_resp_len < sizeof(*ready)) {
ath12k_warn(ab, "Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
return -ECOMM;
}
ready = (struct ath12k_htc_ready *)htc->control_resp_buffer;
message_id = le32_get_bits(ready->id_credit_count, HTC_MSG_MESSAGEID);
credit_count = le32_get_bits(ready->id_credit_count,
HTC_READY_MSG_CREDITCOUNT);
credit_size = le32_get_bits(ready->size_ep, HTC_READY_MSG_CREDITSIZE);
if (message_id != ATH12K_HTC_MSG_READY_ID) {
ath12k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
return -ECOMM;
}
htc->total_transmit_credits = credit_count;
htc->target_credit_size = credit_size;
ath12k_dbg(ab, ATH12K_DBG_HTC,
"Target ready! transmit resources: %d size:%d\n",
htc->total_transmit_credits, htc->target_credit_size);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
ath12k_warn(ab, "Invalid credit size received\n");
return -ECOMM;
}
ath12k_htc_setup_target_buffer_assignments(htc);
return 0;
}
int ath12k_htc_connect_service(struct ath12k_htc *htc,
struct ath12k_htc_svc_conn_req *conn_req,
struct ath12k_htc_svc_conn_resp *conn_resp)
{
struct ath12k_base *ab = htc->ab;
struct ath12k_htc_conn_svc *req_msg;
struct ath12k_htc_conn_svc_resp resp_msg_dummy;
struct ath12k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
enum ath12k_htc_ep_id assigned_eid = ATH12K_HTC_EP_COUNT;
struct ath12k_htc_ep *ep;
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
unsigned long time_left;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
/* special case for HTC pseudo control service */
if (conn_req->service_id == ATH12K_HTC_SVC_ID_RSVD_CTRL) {
disable_credit_flow_ctrl = true;
assigned_eid = ATH12K_HTC_EP_0;
max_msg_size = ATH12K_HTC_MAX_CTRL_MSG_LEN;
memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
goto setup;
}
tx_alloc = ath12k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath12k_htc_build_tx_ctrl_skb();
if (!skb) {
ath12k_warn(ab, "Failed to allocate HTC packet\n");
return -ENOMEM;
}
length = sizeof(*req_msg);
skb_put(skb, length);
memset(skb->data, 0, length);
req_msg = (struct ath12k_htc_conn_svc *)skb->data;
req_msg->msg_svc_id = le32_encode_bits(ATH12K_HTC_MSG_CONNECT_SERVICE_ID,
HTC_MSG_MESSAGEID);
flags |= u32_encode_bits(tx_alloc, ATH12K_HTC_CONN_FLAGS_RECV_ALLOC);
/* Only enable credit flow control for WMI ctrl service */
if (!(conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL ||
conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
flags |= ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
req_msg->flags_len = le32_encode_bits(flags, HTC_SVC_MSG_CONNECTIONFLAGS);
req_msg->msg_svc_id |= le32_encode_bits(conn_req->service_id,
HTC_SVC_MSG_SERVICE_ID);
reinit_completion(&htc->ctl_resp);
status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
/* wait for response */
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH12K_HTC_CONN_SVC_TIMEOUT_HZ);
if (!time_left) {
ath12k_err(ab, "Service connect timeout\n");
return -ETIMEDOUT;
}
/* we controlled the buffer creation, it's aligned */
resp_msg = (struct ath12k_htc_conn_svc_resp *)htc->control_resp_buffer;
message_id = le32_get_bits(resp_msg->msg_svc_id, HTC_MSG_MESSAGEID);
service_id = le32_get_bits(resp_msg->msg_svc_id,
HTC_SVC_RESP_MSG_SERVICEID);
if ((message_id != ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
(htc->control_resp_len < sizeof(*resp_msg))) {
ath12k_err(ab, "Invalid resp message ID 0x%x", message_id);
return -EPROTO;
}
ath12k_dbg(ab, ATH12K_DBG_HTC,
"HTC Service %s connect response: status: %u, assigned ep: %u\n",
htc_service_name(service_id),
le32_get_bits(resp_msg->flags_len, HTC_SVC_RESP_MSG_STATUS),
le32_get_bits(resp_msg->flags_len, HTC_SVC_RESP_MSG_ENDPOINTID));
conn_resp->connect_resp_code = le32_get_bits(resp_msg->flags_len,
HTC_SVC_RESP_MSG_STATUS);
/* check response status */
if (conn_resp->connect_resp_code != ATH12K_HTC_CONN_SVC_STATUS_SUCCESS) {
ath12k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
htc_service_name(service_id),
conn_resp->connect_resp_code);
return -EPROTO;
}
assigned_eid = le32_get_bits(resp_msg->flags_len,
HTC_SVC_RESP_MSG_ENDPOINTID);
max_msg_size = le32_get_bits(resp_msg->flags_len,
HTC_SVC_RESP_MSG_MAXMSGSIZE);
setup:
if (assigned_eid >= ATH12K_HTC_EP_COUNT)
return -EPROTO;
if (max_msg_size == 0)
return -EPROTO;
ep = &htc->endpoint[assigned_eid];
ep->eid = assigned_eid;
if (ep->service_id != ATH12K_HTC_SVC_ID_UNUSED)
return -EPROTO;
/* return assigned endpoint to caller */
conn_resp->eid = assigned_eid;
conn_resp->max_msg_len = le32_get_bits(resp_msg->flags_len,
HTC_SVC_RESP_MSG_MAXMSGSIZE);
/* setup the endpoint */
ep->service_id = conn_req->service_id;
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = le32_get_bits(resp_msg->flags_len,
HTC_SVC_RESP_MSG_MAXMSGSIZE);
ep->tx_credits = tx_alloc;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
status = ath12k_hif_map_service_to_pipe(htc->ab,
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status)
return status;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
return status;
}
int ath12k_htc_start(struct ath12k_htc *htc)
{
struct sk_buff *skb;
int status;
struct ath12k_base *ab = htc->ab;
struct ath12k_htc_setup_complete_extended *msg;
skb = ath12k_htc_build_tx_ctrl_skb();
if (!skb)
return -ENOMEM;
skb_put(skb, sizeof(*msg));
memset(skb->data, 0, skb->len);
msg = (struct ath12k_htc_setup_complete_extended *)skb->data;
msg->msg_id = le32_encode_bits(ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID,
HTC_MSG_MESSAGEID);
ath12k_dbg(ab, ATH12K_DBG_HTC, "HTC is using TX credit flow control\n");
status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
return 0;
}
int ath12k_htc_init(struct ath12k_base *ab)
{
struct ath12k_htc *htc = &ab->htc;
struct ath12k_htc_svc_conn_req conn_req = { };
struct ath12k_htc_svc_conn_resp conn_resp = { };
int ret;
spin_lock_init(&htc->tx_lock);
ath12k_htc_reset_endpoint_states(htc);
htc->ab = ab;
switch (ab->wmi_ab.preferred_hw_mode) {
case WMI_HOST_HW_MODE_SINGLE:
htc->wmi_ep_count = 1;
break;
case WMI_HOST_HW_MODE_DBS:
case WMI_HOST_HW_MODE_DBS_OR_SBS:
htc->wmi_ep_count = 2;
break;
case WMI_HOST_HW_MODE_DBS_SBS:
htc->wmi_ep_count = 3;
break;
default:
htc->wmi_ep_count = ab->hw_params->max_radios;
break;
}
/* setup our pseudo HTC control endpoint connection */
conn_req.ep_ops.ep_tx_complete = ath12k_htc_control_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath12k_htc_control_rx_complete;
conn_req.max_send_queue_depth = ATH12K_NUM_CONTROL_TX_BUFFERS;
conn_req.service_id = ATH12K_HTC_SVC_ID_RSVD_CTRL;
/* connect fake service */
ret = ath12k_htc_connect_service(htc, &conn_req, &conn_resp);
if (ret) {
ath12k_err(ab, "could not connect to htc service (%d)\n", ret);
return ret;
}
init_completion(&htc->ctl_resp);
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/htc.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
#include <linux/pci.h>
#include "core.h"
#include "debug.h"
#include "mhi.h"
#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
.num = 0,
.name = "LOOPBACK",
.num_elements = 32,
.event_ring = 1,
.dir = DMA_TO_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 1,
.name = "LOOPBACK",
.num_elements = 32,
.event_ring = 1,
.dir = DMA_FROM_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 20,
.name = "IPCR",
.num_elements = 32,
.event_ring = 1,
.dir = DMA_TO_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 21,
.name = "IPCR",
.num_elements = 32,
.event_ring = 1,
.dir = DMA_FROM_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = true,
},
};
static struct mhi_event_config ath12k_mhi_events_qcn9274[] = {
{
.num_elements = 32,
.irq_moderation_ms = 0,
.irq = 1,
.data_type = MHI_ER_CTRL,
.mode = MHI_DB_BRST_DISABLE,
.hardware_event = false,
.client_managed = false,
.offload_channel = false,
},
{
.num_elements = 256,
.irq_moderation_ms = 1,
.irq = 2,
.mode = MHI_DB_BRST_DISABLE,
.priority = 1,
.hardware_event = false,
.client_managed = false,
.offload_channel = false,
},
};
const struct mhi_controller_config ath12k_mhi_config_qcn9274 = {
.max_channels = 30,
.timeout_ms = 10000,
.use_bounce_buf = false,
.buf_len = 0,
.num_channels = ARRAY_SIZE(ath12k_mhi_channels_qcn9274),
.ch_cfg = ath12k_mhi_channels_qcn9274,
.num_events = ARRAY_SIZE(ath12k_mhi_events_qcn9274),
.event_cfg = ath12k_mhi_events_qcn9274,
};
static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = {
{
.num = 0,
.name = "LOOPBACK",
.num_elements = 32,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 1,
.name = "LOOPBACK",
.num_elements = 32,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 20,
.name = "IPCR",
.num_elements = 64,
.event_ring = 1,
.dir = DMA_TO_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = false,
},
{
.num = 21,
.name = "IPCR",
.num_elements = 64,
.event_ring = 1,
.dir = DMA_FROM_DEVICE,
.ee_mask = 0x4,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
.offload_channel = false,
.doorbell_mode_switch = false,
.auto_queue = true,
},
};
static struct mhi_event_config ath12k_mhi_events_wcn7850[] = {
{
.num_elements = 32,
.irq_moderation_ms = 0,
.irq = 1,
.mode = MHI_DB_BRST_DISABLE,
.data_type = MHI_ER_CTRL,
.hardware_event = false,
.client_managed = false,
.offload_channel = false,
},
{
.num_elements = 256,
.irq_moderation_ms = 1,
.irq = 2,
.mode = MHI_DB_BRST_DISABLE,
.priority = 1,
.hardware_event = false,
.client_managed = false,
.offload_channel = false,
},
};
const struct mhi_controller_config ath12k_mhi_config_wcn7850 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
.buf_len = 0,
.num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850),
.ch_cfg = ath12k_mhi_channels_wcn7850,
.num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850),
.event_cfg = ath12k_mhi_events_wcn7850,
};
void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab)
{
u32 val;
val = ath12k_pci_read32(ab, MHISTATUS);
ath12k_dbg(ab, ATH12K_DBG_PCI, "MHISTATUS 0x%x\n", val);
/* Observed on some targets that after SOC_GLOBAL_RESET, MHISTATUS
* has SYSERR bit set and thus need to set MHICTRL_RESET
* to clear SYSERR.
*/
ath12k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
mdelay(10);
}
static void ath12k_mhi_reset_txvecdb(struct ath12k_base *ab)
{
ath12k_pci_write32(ab, PCIE_TXVECDB, 0);
}
static void ath12k_mhi_reset_txvecstatus(struct ath12k_base *ab)
{
ath12k_pci_write32(ab, PCIE_TXVECSTATUS, 0);
}
static void ath12k_mhi_reset_rxvecdb(struct ath12k_base *ab)
{
ath12k_pci_write32(ab, PCIE_RXVECDB, 0);
}
static void ath12k_mhi_reset_rxvecstatus(struct ath12k_base *ab)
{
ath12k_pci_write32(ab, PCIE_RXVECSTATUS, 0);
}
void ath12k_mhi_clear_vector(struct ath12k_base *ab)
{
ath12k_mhi_reset_txvecdb(ab);
ath12k_mhi_reset_txvecstatus(ab);
ath12k_mhi_reset_rxvecdb(ab);
ath12k_mhi_reset_rxvecstatus(ab);
}
static int ath12k_mhi_get_msi(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
u32 user_base_data, base_vector;
int ret, num_vectors, i;
int *irq;
ret = ath12k_pci_get_user_msi_assignment(ab,
"MHI", &num_vectors,
&user_base_data, &base_vector);
if (ret)
return ret;
ath12k_dbg(ab, ATH12K_DBG_PCI, "Number of assigned MSI for MHI is %d, base vector is %d\n",
num_vectors, base_vector);
irq = kcalloc(num_vectors, sizeof(*irq), GFP_KERNEL);
if (!irq)
return -ENOMEM;
for (i = 0; i < num_vectors; i++)
irq[i] = ath12k_pci_get_msi_irq(ab->dev,
base_vector + i);
ab_pci->mhi_ctrl->irq = irq;
ab_pci->mhi_ctrl->nr_irqs = num_vectors;
return 0;
}
static int ath12k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static void ath12k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
{
}
static char *ath12k_mhi_op_callback_to_str(enum mhi_callback reason)
{
switch (reason) {
case MHI_CB_IDLE:
return "MHI_CB_IDLE";
case MHI_CB_PENDING_DATA:
return "MHI_CB_PENDING_DATA";
case MHI_CB_LPM_ENTER:
return "MHI_CB_LPM_ENTER";
case MHI_CB_LPM_EXIT:
return "MHI_CB_LPM_EXIT";
case MHI_CB_EE_RDDM:
return "MHI_CB_EE_RDDM";
case MHI_CB_EE_MISSION_MODE:
return "MHI_CB_EE_MISSION_MODE";
case MHI_CB_SYS_ERROR:
return "MHI_CB_SYS_ERROR";
case MHI_CB_FATAL_ERROR:
return "MHI_CB_FATAL_ERROR";
case MHI_CB_BW_REQ:
return "MHI_CB_BW_REQ";
default:
return "UNKNOWN";
}
}
static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n",
ath12k_mhi_op_callback_to_str(cb));
switch (cb) {
case MHI_CB_SYS_ERROR:
ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break;
case MHI_CB_EE_RDDM:
if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
break;
default:
break;
}
}
static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *addr,
u32 *out)
{
*out = readl(addr);
return 0;
}
static void ath12k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
void __iomem *addr,
u32 val)
{
writel(val, addr);
}
int ath12k_mhi_register(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
int ret;
mhi_ctrl = mhi_alloc_controller();
if (!mhi_ctrl)
return -ENOMEM;
ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
ab_pci->amss_path,
sizeof(ab_pci->amss_path));
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
mhi_ctrl->fw_image = ab_pci->amss_path;
mhi_ctrl->regs = ab->mem;
mhi_ctrl->reg_len = ab->mem_len;
ret = ath12k_mhi_get_msi(ab_pci);
if (ret) {
ath12k_err(ab, "failed to get msi for mhi\n");
mhi_free_controller(mhi_ctrl);
return ret;
}
mhi_ctrl->iova_start = 0;
mhi_ctrl->iova_stop = 0xffffffff;
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
mhi_ctrl->runtime_get = ath12k_mhi_op_runtime_get;
mhi_ctrl->runtime_put = ath12k_mhi_op_runtime_put;
mhi_ctrl->status_cb = ath12k_mhi_op_status_cb;
mhi_ctrl->read_reg = ath12k_mhi_op_read_reg;
mhi_ctrl->write_reg = ath12k_mhi_op_write_reg;
ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config);
if (ret) {
ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
mhi_free_controller(mhi_ctrl);
return ret;
}
return 0;
}
void ath12k_mhi_unregister(struct ath12k_pci *ab_pci)
{
struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
mhi_unregister_controller(mhi_ctrl);
kfree(mhi_ctrl->irq);
mhi_free_controller(mhi_ctrl);
ab_pci->mhi_ctrl = NULL;
}
static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state)
{
switch (mhi_state) {
case ATH12K_MHI_INIT:
return "INIT";
case ATH12K_MHI_DEINIT:
return "DEINIT";
case ATH12K_MHI_POWER_ON:
return "POWER_ON";
case ATH12K_MHI_POWER_OFF:
return "POWER_OFF";
case ATH12K_MHI_FORCE_POWER_OFF:
return "FORCE_POWER_OFF";
case ATH12K_MHI_SUSPEND:
return "SUSPEND";
case ATH12K_MHI_RESUME:
return "RESUME";
case ATH12K_MHI_TRIGGER_RDDM:
return "TRIGGER_RDDM";
case ATH12K_MHI_RDDM_DONE:
return "RDDM_DONE";
default:
return "UNKNOWN";
}
};
static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci,
enum ath12k_mhi_state mhi_state)
{
struct ath12k_base *ab = ab_pci->ab;
switch (mhi_state) {
case ATH12K_MHI_INIT:
set_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state);
break;
case ATH12K_MHI_DEINIT:
clear_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state);
break;
case ATH12K_MHI_POWER_ON:
set_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
break;
case ATH12K_MHI_POWER_OFF:
case ATH12K_MHI_FORCE_POWER_OFF:
clear_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
clear_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
clear_bit(ATH12K_MHI_RDDM_DONE, &ab_pci->mhi_state);
break;
case ATH12K_MHI_SUSPEND:
set_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state);
break;
case ATH12K_MHI_RESUME:
clear_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state);
break;
case ATH12K_MHI_TRIGGER_RDDM:
set_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
break;
case ATH12K_MHI_RDDM_DONE:
set_bit(ATH12K_MHI_RDDM_DONE, &ab_pci->mhi_state);
break;
default:
ath12k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
}
}
static int ath12k_mhi_check_state_bit(struct ath12k_pci *ab_pci,
enum ath12k_mhi_state mhi_state)
{
struct ath12k_base *ab = ab_pci->ab;
switch (mhi_state) {
case ATH12K_MHI_INIT:
if (!test_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state))
return 0;
break;
case ATH12K_MHI_DEINIT:
case ATH12K_MHI_POWER_ON:
if (test_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state) &&
!test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state))
return 0;
break;
case ATH12K_MHI_FORCE_POWER_OFF:
if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state))
return 0;
break;
case ATH12K_MHI_POWER_OFF:
case ATH12K_MHI_SUSPEND:
if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) &&
!test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state))
return 0;
break;
case ATH12K_MHI_RESUME:
if (test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state))
return 0;
break;
case ATH12K_MHI_TRIGGER_RDDM:
if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) &&
!test_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state))
return 0;
break;
case ATH12K_MHI_RDDM_DONE:
return 0;
default:
ath12k_err(ab, "unhandled mhi state: %s(%d)\n",
ath12k_mhi_state_to_str(mhi_state), mhi_state);
}
ath12k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n",
ath12k_mhi_state_to_str(mhi_state), mhi_state,
ab_pci->mhi_state);
return -EINVAL;
}
static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci,
enum ath12k_mhi_state mhi_state)
{
struct ath12k_base *ab = ab_pci->ab;
int ret;
ret = ath12k_mhi_check_state_bit(ab_pci, mhi_state);
if (ret)
goto out;
ath12k_dbg(ab, ATH12K_DBG_PCI, "setting mhi state: %s(%d)\n",
ath12k_mhi_state_to_str(mhi_state), mhi_state);
switch (mhi_state) {
case ATH12K_MHI_INIT:
ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
break;
case ATH12K_MHI_DEINIT:
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
ret = 0;
break;
case ATH12K_MHI_POWER_ON:
ret = mhi_async_power_up(ab_pci->mhi_ctrl);
break;
case ATH12K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);
ret = 0;
break;
case ATH12K_MHI_FORCE_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, false);
ret = 0;
break;
case ATH12K_MHI_SUSPEND:
ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
break;
case ATH12K_MHI_RESUME:
ret = mhi_pm_resume(ab_pci->mhi_ctrl);
break;
case ATH12K_MHI_TRIGGER_RDDM:
ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
break;
case ATH12K_MHI_RDDM_DONE:
break;
default:
ath12k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
ret = -EINVAL;
}
if (ret)
goto out;
ath12k_mhi_set_state_bit(ab_pci, mhi_state);
return 0;
out:
ath12k_err(ab, "failed to set mhi state: %s(%d)\n",
ath12k_mhi_state_to_str(mhi_state), mhi_state);
return ret;
}
int ath12k_mhi_start(struct ath12k_pci *ab_pci)
{
int ret;
ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
ret = ath12k_mhi_set_state(ab_pci, ATH12K_MHI_INIT);
if (ret)
goto out;
ret = ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_ON);
if (ret)
goto out;
return 0;
out:
return ret;
}
void ath12k_mhi_stop(struct ath12k_pci *ab_pci)
{
ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF);
ath12k_mhi_set_state(ab_pci, ATH12K_MHI_DEINIT);
}
void ath12k_mhi_suspend(struct ath12k_pci *ab_pci)
{
ath12k_mhi_set_state(ab_pci, ATH12K_MHI_SUSPEND);
}
void ath12k_mhi_resume(struct ath12k_pci *ab_pci)
{
ath12k_mhi_set_state(ab_pci, ATH12K_MHI_RESUME);
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/mhi.c
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
#include "hif.h"
#include "debug.h"
#include "dp_rx.h"
#include "peer.h"
#include "dp_mon.h"
static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
/* TODO: Any other peer specific DP cleanup */
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, vdev_id, addr);
if (!peer) {
ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
addr, vdev_id);
spin_unlock_bh(&ab->base_lock);
return;
}
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
crypto_free_shash(peer->tfm_mmic);
spin_unlock_bh(&ab->base_lock);
}
int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
u32 reo_dest;
int ret = 0, tid;
/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
reo_dest = ar->dp.mac_id + 1;
ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
WMI_PEER_SET_DEFAULT_ROUTING,
DP_RX_HASH_ENABLE | (reo_dest << 1));
if (ret) {
ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
ret, addr, vdev_id);
return ret;
}
for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
HAL_PN_TYPE_NONE);
if (ret) {
ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
tid, ret);
goto peer_clean;
}
}
ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
if (ret) {
ath12k_warn(ab, "failed to setup rx defrag context\n");
goto peer_clean;
}
/* TODO: Setup other peer specific resource used in data path */
return 0;
peer_clean:
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find(ab, vdev_id, addr);
if (!peer) {
ath12k_warn(ab, "failed to find the peer to del rx tid\n");
spin_unlock_bh(&ab->base_lock);
return -ENOENT;
}
for (; tid >= 0; tid--)
ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
spin_unlock_bh(&ab->base_lock);
return ret;
}
void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
{
if (!ring->vaddr_unaligned)
return;
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
ring->vaddr_unaligned = NULL;
}
static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
{
int ext_group_num;
u8 mask = 1 << ring_num;
for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
ext_group_num++) {
if (mask & grp_mask[ext_group_num])
return ext_group_num;
}
return -ENOENT;
}
static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
enum hal_ring_type type, int ring_num)
{
const u8 *grp_mask;
switch (type) {
case HAL_WBM2SW_RELEASE:
if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
grp_mask = &ab->hw_params->ring_mask->tx[0];
}
break;
case HAL_REO_EXCEPTION:
grp_mask = &ab->hw_params->ring_mask->rx_err[0];
break;
case HAL_REO_DST:
grp_mask = &ab->hw_params->ring_mask->rx[0];
break;
case HAL_REO_STATUS:
grp_mask = &ab->hw_params->ring_mask->reo_status[0];
break;
case HAL_RXDMA_MONITOR_STATUS:
case HAL_RXDMA_MONITOR_DST:
grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
break;
case HAL_TX_MONITOR_DST:
grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
break;
case HAL_RXDMA_BUF:
grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
break;
case HAL_RXDMA_MONITOR_BUF:
case HAL_TCL_DATA:
case HAL_TCL_CMD:
case HAL_REO_CMD:
case HAL_SW2WBM_RELEASE:
case HAL_WBM_IDLE_LINK:
case HAL_TCL_STATUS:
case HAL_REO_REINJECT:
case HAL_CE_SRC:
case HAL_CE_DST:
case HAL_CE_DST_STATUS:
default:
return -ENOENT;
}
return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
}
static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
struct hal_srng_params *ring_params,
enum hal_ring_type type, int ring_num)
{
int msi_group_number, msi_data_count;
u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
int ret;
ret = ath12k_hif_get_user_msi_vector(ab, "DP",
&msi_data_count, &msi_data_start,
&msi_irq_start);
if (ret)
return;
msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
ring_num);
if (msi_group_number < 0) {
ath12k_dbg(ab, ATH12K_DBG_PCI,
"ring not part of an ext_group; ring_type: %d,ring_num %d",
type, ring_num);
ring_params->msi_addr = 0;
ring_params->msi_data = 0;
return;
}
if (msi_group_number > msi_data_count) {
ath12k_dbg(ab, ATH12K_DBG_PCI,
"multiple msi_groups share one msi, msi_group_num %d",
msi_group_number);
}
ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
ring_params->msi_addr = addr_lo;
ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
ring_params->msi_data = (msi_group_number % msi_data_count)
+ msi_data_start;
ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
}
int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
enum hal_ring_type type, int ring_num,
int mac_id, int num_entries)
{
struct hal_srng_params params = { 0 };
int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
int ret;
if (max_entries < 0 || entry_sz < 0)
return -EINVAL;
if (num_entries > max_entries)
num_entries = max_entries;
ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);
if (!ring->vaddr_unaligned)
return -ENOMEM;
ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
(unsigned long)ring->vaddr_unaligned);
params.ring_base_vaddr = ring->vaddr;
params.ring_base_paddr = ring->paddr;
params.num_entries = num_entries;
ath12k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id);
switch (type) {
case HAL_REO_DST:
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_RX;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
case HAL_RXDMA_BUF:
case HAL_RXDMA_MONITOR_BUF:
case HAL_RXDMA_MONITOR_STATUS:
params.low_threshold = num_entries >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.intr_batch_cntr_thres_entries = 0;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
case HAL_TX_MONITOR_DST:
params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.intr_batch_cntr_thres_entries = 0;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
case HAL_WBM2SW_RELEASE:
if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_TX;
params.intr_timer_thres_us =
HAL_SRNG_INT_TIMER_THRESHOLD_TX;
break;
}
/* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
fallthrough;
case HAL_REO_EXCEPTION:
case HAL_REO_REINJECT:
case HAL_REO_CMD:
case HAL_REO_STATUS:
case HAL_TCL_DATA:
case HAL_TCL_CMD:
case HAL_TCL_STATUS:
case HAL_WBM_IDLE_LINK:
case HAL_SW2WBM_RELEASE:
case HAL_RXDMA_DST:
case HAL_RXDMA_MONITOR_DST:
case HAL_RXDMA_MONITOR_DESC:
params.intr_batch_cntr_thres_entries =
HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
break;
case HAL_RXDMA_DIR_BUF:
break;
default:
ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
return -EINVAL;
}
ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms);
if (ret < 0) {
ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
ret, ring_num);
return ret;
}
ring->ring_id = ret;
return 0;
}
static
u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
{
u32 bank_config = 0;
/* Only valid for raw frames with HW crypto enabled.
* With SW crypto, mac80211 sets key per packet
*/
if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
bank_config |=
u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
bank_config |= u32_encode_bits(arvif->tx_encap_type,
HAL_TX_BANK_CONFIG_ENCAP_TYPE);
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
/* only valid if idx_lookup_override is not set in tcl_data_cmd */
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
HAL_TX_BANK_CONFIG_ADDRX_EN) |
u32_encode_bits(!!(arvif->hal_addr_search_flags &
HAL_TX_ADDRY_EN),
HAL_TX_BANK_CONFIG_ADDRY_EN);
bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
HAL_TX_BANK_CONFIG_MESH_EN) |
u32_encode_bits(arvif->vdev_id_check_en,
HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
return bank_config;
}
static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
struct ath12k_dp *dp)
{
int bank_id = DP_INVALID_BANK_ID;
int i;
u32 bank_config;
bool configure_register = false;
/* convert vdev params into hal_tx_bank_config */
bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
spin_lock_bh(&dp->tx_bank_lock);
/* TODO: implement using idr kernel framework*/
for (i = 0; i < dp->num_bank_profiles; i++) {
if (dp->bank_profiles[i].is_configured &&
(dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
bank_id = i;
goto inc_ref_and_return;
}
if (!dp->bank_profiles[i].is_configured ||
!dp->bank_profiles[i].num_users) {
bank_id = i;
goto configure_and_return;
}
}
if (bank_id == DP_INVALID_BANK_ID) {
spin_unlock_bh(&dp->tx_bank_lock);
ath12k_err(ab, "unable to find TX bank!");
return bank_id;
}
configure_and_return:
dp->bank_profiles[bank_id].is_configured = true;
dp->bank_profiles[bank_id].bank_config = bank_config;
configure_register = true;
inc_ref_and_return:
dp->bank_profiles[bank_id].num_users++;
spin_unlock_bh(&dp->tx_bank_lock);
if (configure_register)
ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
dp->bank_profiles[bank_id].num_users);
return bank_id;
}
void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
{
spin_lock_bh(&dp->tx_bank_lock);
dp->bank_profiles[bank_id].num_users--;
spin_unlock_bh(&dp->tx_bank_lock);
}
static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
kfree(dp->bank_profiles);
dp->bank_profiles = NULL;
}
static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
int i;
dp->num_bank_profiles = num_tcl_banks;
dp->bank_profiles = kmalloc_array(num_tcl_banks,
sizeof(struct ath12k_dp_tx_bank_profile),
GFP_KERNEL);
if (!dp->bank_profiles)
return -ENOMEM;
spin_lock_init(&dp->tx_bank_lock);
for (i = 0; i < num_tcl_banks; i++) {
dp->bank_profiles[i].is_configured = false;
dp->bank_profiles[i].num_users = 0;
}
return 0;
}
static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i;
ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring);
ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
}
ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
}
static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
struct hal_srng *srng;
int i, ret, tx_comp_ring_num;
u32 ring_hash_map;
ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
DP_WBM_RELEASE_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
ret);
goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
DP_TCL_CMD_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
0, 0, DP_TCL_STATUS_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
goto err;
}
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
tx_comp_ring_num = map[i].wbm_ring_num;
ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
HAL_TCL_DATA, i, 0,
DP_TCL_DATA_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
i, ret);
goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
DP_TX_COMP_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
tx_comp_ring_num, ret);
goto err;
}
}
ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
0, 0, DP_REO_REINJECT_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
ret);
goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
HAL_WBM2SW_REL_ERR_RING_NUM, 0,
DP_RX_RELEASE_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
0, 0, DP_REO_EXCEPTION_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
ret);
goto err;
}
ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
0, 0, DP_REO_CMD_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
goto err;
}
srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
ath12k_hal_reo_init_cmd_ring(ab, srng);
ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
0, 0, DP_REO_STATUS_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
goto err;
}
/* When hash based routing of rx packet is enabled, 32 entries to map
* the hash values to the ring will be configured. Each hash entry uses
* four bits to map to a particular ring. The ring mapping will be
* 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
* 8:SW6, 9:SW7, 10:SW8, 11:Not used.
*/
ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
HAL_HASH_ROUTING_RING_SW2 << 4 |
HAL_HASH_ROUTING_RING_SW3 << 8 |
HAL_HASH_ROUTING_RING_SW4 << 12 |
HAL_HASH_ROUTING_RING_SW1 << 16 |
HAL_HASH_ROUTING_RING_SW2 << 20 |
HAL_HASH_ROUTING_RING_SW3 << 24 |
HAL_HASH_ROUTING_RING_SW4 << 28;
ath12k_hal_reo_hw_setup(ab, ring_hash_map);
return 0;
err:
ath12k_dp_srng_common_cleanup(ab);
return ret;
}
static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
int i;
for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
if (!slist[i].vaddr)
continue;
dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
slist[i].vaddr, slist[i].paddr);
slist[i].vaddr = NULL;
}
}
static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
int size,
u32 n_link_desc_bank,
u32 n_link_desc,
u32 last_bank_sz)
{
struct ath12k_dp *dp = &ab->dp;
struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
u32 n_entries_per_buf;
int num_scatter_buf, scatter_idx;
struct hal_wbm_link_desc *scatter_buf;
int align_bytes, n_entries;
dma_addr_t paddr;
int rem_entries;
int i;
int ret = 0;
u32 end_offset, cookie;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
return -EINVAL;
for (i = 0; i < num_scatter_buf; i++) {
slist[i].vaddr = dma_alloc_coherent(ab->dev,
HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
&slist[i].paddr, GFP_KERNEL);
if (!slist[i].vaddr) {
ret = -ENOMEM;
goto err;
}
}
scatter_idx = 0;
scatter_buf = slist[scatter_idx].vaddr;
rem_entries = n_entries_per_buf;
for (i = 0; i < n_link_desc_bank; i++) {
align_bytes = link_desc_banks[i].vaddr -
link_desc_banks[i].vaddr_unaligned;
n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
HAL_LINK_DESC_SIZE;
paddr = link_desc_banks[i].paddr;
while (n_entries) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
if (rem_entries) {
rem_entries--;
scatter_buf++;
continue;
}
rem_entries = n_entries_per_buf;
scatter_idx++;
scatter_buf = slist[scatter_idx].vaddr;
}
}
end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
sizeof(struct hal_wbm_link_desc);
ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
n_link_desc, end_offset);
return 0;
err:
ath12k_dp_scatter_idle_link_desc_cleanup(ab);
return ret;
}
static void
ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
struct dp_link_desc_bank *link_desc_banks)
{
int i;
for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
if (link_desc_banks[i].vaddr_unaligned) {
dma_free_coherent(ab->dev,
link_desc_banks[i].size,
link_desc_banks[i].vaddr_unaligned,
link_desc_banks[i].paddr_unaligned);
link_desc_banks[i].vaddr_unaligned = NULL;
}
}
}
static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
struct dp_link_desc_bank *desc_bank,
int n_link_desc_bank,
int last_bank_sz)
{
struct ath12k_dp *dp = &ab->dp;
int i;
int ret = 0;
int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
for (i = 0; i < n_link_desc_bank; i++) {
if (i == (n_link_desc_bank - 1) && last_bank_sz)
desc_sz = last_bank_sz;
desc_bank[i].vaddr_unaligned =
dma_alloc_coherent(ab->dev, desc_sz,
&desc_bank[i].paddr_unaligned,
GFP_KERNEL);
if (!desc_bank[i].vaddr_unaligned) {
ret = -ENOMEM;
goto err;
}
desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
HAL_LINK_DESC_ALIGN);
desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
((unsigned long)desc_bank[i].vaddr -
(unsigned long)desc_bank[i].vaddr_unaligned);
desc_bank[i].size = desc_sz;
}
return 0;
err:
ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
return ret;
}
void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
struct dp_link_desc_bank *desc_bank,
u32 ring_type, struct dp_srng *ring)
{
ath12k_dp_link_desc_bank_free(ab, desc_bank);
if (ring_type != HAL_RXDMA_MONITOR_DESC) {
ath12k_dp_srng_cleanup(ab, ring);
ath12k_dp_scatter_idle_link_desc_cleanup(ab);
}
}
static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
{
struct ath12k_dp *dp = &ab->dp;
u32 n_mpdu_link_desc, n_mpdu_queue_desc;
u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
int ret = 0;
n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
HAL_NUM_MPDUS_PER_LINK_DESC;
n_mpdu_queue_desc = n_mpdu_link_desc /
HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
DP_AVG_MSDUS_PER_FLOW) /
HAL_NUM_TX_MSDUS_PER_LINK_DESC;
n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
DP_AVG_MSDUS_PER_MPDU) /
HAL_NUM_RX_MSDUS_PER_LINK_DESC;
*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
n_tx_msdu_link_desc + n_rx_msdu_link_desc;
if (*n_link_desc & (*n_link_desc - 1))
*n_link_desc = 1 << fls(*n_link_desc);
ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring,
HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
if (ret) {
ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
return ret;
}
return ret;
}
int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
struct dp_link_desc_bank *link_desc_banks,
u32 ring_type, struct hal_srng *srng,
u32 n_link_desc)
{
u32 tot_mem_sz;
u32 n_link_desc_bank, last_bank_sz;
u32 entry_sz, align_bytes, n_entries;
struct hal_wbm_link_desc *desc;
u32 paddr;
int i, ret;
u32 cookie;
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
tot_mem_sz += HAL_LINK_DESC_ALIGN;
if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
n_link_desc_bank = 1;
last_bank_sz = tot_mem_sz;
} else {
n_link_desc_bank = tot_mem_sz /
(DP_LINK_DESC_ALLOC_SIZE_THRESH -
HAL_LINK_DESC_ALIGN);
last_bank_sz = tot_mem_sz %
(DP_LINK_DESC_ALLOC_SIZE_THRESH -
HAL_LINK_DESC_ALIGN);
if (last_bank_sz)
n_link_desc_bank += 1;
}
if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
return -EINVAL;
ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
n_link_desc_bank, last_bank_sz);
if (ret)
return ret;
/* Setup link desc idle list for HW internal usage */
entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
tot_mem_sz = entry_sz * n_link_desc;
/* Setup scatter desc list when the total memory requirement is more */
if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
ring_type != HAL_RXDMA_MONITOR_DESC) {
ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
n_link_desc_bank,
n_link_desc,
last_bank_sz);
if (ret) {
ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
ret);
goto fail_desc_bank_free;
}
return 0;
}
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
for (i = 0; i < n_link_desc_bank; i++) {
align_bytes = link_desc_banks[i].vaddr -
link_desc_banks[i].vaddr_unaligned;
n_entries = (link_desc_banks[i].size - align_bytes) /
HAL_LINK_DESC_SIZE;
paddr = link_desc_banks[i].paddr;
while (n_entries &&
(desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
ath12k_hal_set_link_desc_addr(desc,
cookie, paddr);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return 0;
fail_desc_bank_free:
ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
return ret;
}
int ath12k_dp_service_srng(struct ath12k_base *ab,
struct ath12k_ext_irq_grp *irq_grp,
int budget)
{
struct napi_struct *napi = &irq_grp->napi;
int grp_id = irq_grp->grp_id;
int work_done = 0;
int i = 0, j;
int tot_work_done = 0;
enum dp_monitor_mode monitor_mode;
u8 ring_mask;
while (i < ab->hw_params->max_tx_ring) {
if (ab->hw_params->ring_mask->tx[grp_id] &
BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
ath12k_dp_tx_completion_handler(ab, i);
i++;
}
if (ab->hw_params->ring_mask->rx_err[grp_id]) {
work_done = ath12k_dp_rx_process_err(ab, napi, budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
work_done = ath12k_dp_rx_process_wbm_err(ab,
napi,
budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
if (ab->hw_params->ring_mask->rx[grp_id]) {
i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
work_done = ath12k_dp_rx_process(ab, i, napi,
budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
int id = i * ab->hw_params->num_rxmda_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
ath12k_dp_mon_process_ring(ab, id, napi, budget,
monitor_mode);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
}
}
}
if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
int id = i * ab->hw_params->num_rxmda_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
ath12k_dp_mon_process_ring(ab, id, napi, budget,
monitor_mode);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
}
}
}
if (ab->hw_params->ring_mask->reo_status[grp_id])
ath12k_dp_rx_process_reo_status(ab);
if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, 0,
ab->hw_params->hal_params->rx_buf_rbm,
true);
}
/* TODO: Implement handler for other interrupts */
done:
return tot_work_done;
}
void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
int i;
del_timer_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
}
void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev_dp *dp;
int i;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
dp = &ar->dp;
dp->mac_id = i;
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq);
/* TODO: Add any RXDMA setup required per pdev */
}
}
static void ath12k_dp_service_mon_ring(struct timer_list *t)
{
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
ATH12K_DP_RX_MONITOR_MODE);
mod_timer(&ab->mon_reap_timer, jiffies +
msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
}
static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
{
if (ab->hw_params->rxdma1_enable)
return;
timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
}
int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
{
struct ath12k *ar;
int ret;
int i;
ret = ath12k_dp_rx_htt_setup(ab);
if (ret)
goto out;
ath12k_dp_mon_reap_timer_init(ab);
/* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ret = ath12k_dp_rx_pdev_alloc(ab, i);
if (ret) {
ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
i);
goto err;
}
ret = ath12k_dp_rx_pdev_mon_attach(ar);
if (ret) {
ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
goto err;
}
}
return 0;
err:
ath12k_dp_pdev_free(ab);
out:
return ret;
}
int ath12k_dp_htt_connect(struct ath12k_dp *dp)
{
struct ath12k_htc_svc_conn_req conn_req = {0};
struct ath12k_htc_svc_conn_resp conn_resp = {0};
int status;
conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
/* connect to control service */
conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
&conn_resp);
if (status)
return status;
dp->eid = conn_resp.eid;
return 0;
}
static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
{
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
/* TODO: Verify the search type and flags since ast hash
* is not part of peer mapv3
*/
arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
break;
case WMI_VDEV_TYPE_AP:
case WMI_VDEV_TYPE_IBSS:
arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
break;
case WMI_VDEV_TYPE_MONITOR:
default:
return;
}
}
void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
{
struct ath12k_base *ab = ar->ab;
arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
u32_encode_bits(arvif->vdev_id,
HTT_TCL_META_DATA_VDEV_ID) |
u32_encode_bits(ar->pdev->pdev_id,
HTT_TCL_META_DATA_PDEV_ID);
/* set HTT extension valid bit to 0 by default */
arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
ath12k_dp_update_vdev_search(arvif);
arvif->vdev_id_check_en = true;
arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
/* TODO: error path for bank id failure */
if (arvif->bank_id == DP_INVALID_BANK_ID) {
ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
return;
}
}
static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
{
struct ath12k_rx_desc_info *desc_info, *tmp;
struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
struct ath12k_dp *dp = &ab->dp;
struct sk_buff *skb;
int i;
u32 pool_id, tx_spt_page;
if (!dp->spt_info)
return;
/* RX Descriptor cleanup */
spin_lock_bh(&dp->rx_desc_lock);
list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) {
list_del(&desc_info->list);
skb = desc_info->skb;
if (!skb)
continue;
dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
if (!dp->spt_info->rxbaddr[i])
continue;
kfree(dp->spt_info->rxbaddr[i]);
dp->spt_info->rxbaddr[i] = NULL;
}
spin_unlock_bh(&dp->rx_desc_lock);
/* TX Descriptor cleanup */
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
spin_lock_bh(&dp->tx_desc_lock[i]);
list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
list) {
list_del(&tx_desc_info->list);
skb = tx_desc_info->skb;
if (!skb)
continue;
dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
spin_unlock_bh(&dp->tx_desc_lock[i]);
}
for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
if (!dp->spt_info->txbaddr[tx_spt_page])
continue;
kfree(dp->spt_info->txbaddr[tx_spt_page]);
dp->spt_info->txbaddr[tx_spt_page] = NULL;
}
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
}
/* unmap SPT pages */
for (i = 0; i < dp->num_spt_pages; i++) {
if (!dp->spt_info[i].vaddr)
continue;
dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE,
dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
dp->spt_info[i].vaddr = NULL;
}
kfree(dp->spt_info);
}
static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
if (!ab->hw_params->reoq_lut_support)
return;
if (!dp->reoq_lut.vaddr)
return;
dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
dp->reoq_lut.vaddr = NULL;
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
}
void ath12k_dp_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i;
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
ath12k_dp_cc_cleanup(ab);
ath12k_dp_reoq_lut_cleanup(ab);
ath12k_dp_deinit_bank_profiles(ab);
ath12k_dp_srng_common_cleanup(ab);
ath12k_dp_rx_reo_cmd_list_cleanup(ab);
for (i = 0; i < ab->hw_params->max_tx_ring; i++)
kfree(dp->tx_ring[i].tx_status);
ath12k_dp_rx_free(ab);
/* Deinit any SOC level resource */
}
void ath12k_dp_cc_config(struct ath12k_base *ab)
{
u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
u32 val = 0;
ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
u32_encode_bits(ATH12K_CC_PPT_MSB,
HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
u32_encode_bits(ATH12K_CC_SPT_MSB,
HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val);
/* Enable HW CC for WBM */
ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
u32_encode_bits(ATH12K_CC_PPT_MSB,
HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
u32_encode_bits(ATH12K_CC_SPT_MSB,
HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
/* Enable conversion complete indication */
val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
/* Enable Cookie conversion for WBM2SW Rings */
val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
ab->hw_params->hal_params->wbm2sw_cc_enable;
ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
}
static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
{
return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
}
static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
u16 ppt_idx, u16 spt_idx)
{
struct ath12k_dp *dp = &ab->dp;
return dp->spt_info[ppt_idx].vaddr + spt_idx;
}
struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie)
{
struct ath12k_rx_desc_info **desc_addr_ptr;
u16 ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
return *desc_addr_ptr;
}
struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
u32 cookie)
{
struct ath12k_tx_desc_info **desc_addr_ptr;
u16 ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES ||
ppt_idx > ab->dp.num_spt_pages ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
return *desc_addr_ptr;
}
static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
u32 i, j, pool_id, tx_spt_page;
u32 ppt_idx;
spin_lock_bh(&dp->rx_desc_lock);
/* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
GFP_ATOMIC);
if (!rx_descs) {
spin_unlock_bh(&dp->rx_desc_lock);
return -ENOMEM;
}
dp->spt_info->rxbaddr[i] = &rx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
/* Update descriptor VA in SPT */
rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j);
*rx_desc_addr = &rx_descs[j];
}
}
spin_unlock_bh(&dp->rx_desc_lock);
for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
GFP_ATOMIC);
if (!tx_descs) {
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
/* Caller takes care of TX pending and RX desc cleanup */
return -ENOMEM;
}
tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
tx_descs[j].pool_id = pool_id;
list_add_tail(&tx_descs[j].list,
&dp->tx_desc_free_list[pool_id]);
/* Update descriptor VA in SPT */
tx_desc_addr =
ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
*tx_desc_addr = &tx_descs[j];
}
}
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
}
return 0;
}
static int ath12k_dp_cc_init(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i, ret = 0;
u32 cmem_base;
INIT_LIST_HEAD(&dp->rx_desc_free_list);
INIT_LIST_HEAD(&dp->rx_desc_used_list);
spin_lock_init(&dp->rx_desc_lock);
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
spin_lock_init(&dp->tx_desc_lock[i]);
}
dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info),
GFP_KERNEL);
if (!dp->spt_info) {
ath12k_warn(ab, "SPT page allocation failure");
return -ENOMEM;
}
cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
for (i = 0; i < dp->num_spt_pages; i++) {
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
ATH12K_PAGE_SIZE,
&dp->spt_info[i].paddr,
GFP_KERNEL);
if (!dp->spt_info[i].vaddr) {
ret = -ENOMEM;
goto free;
}
if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
ath12k_warn(ab, "SPT allocated memory is not 4K aligned");
ret = -EINVAL;
goto free;
}
/* Write to PPT in CMEM */
ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
}
ret = ath12k_dp_cc_desc_init(ab);
if (ret) {
ath12k_warn(ab, "HW CC desc init failed %d", ret);
goto free;
}
return 0;
free:
ath12k_dp_cc_cleanup(ab);
return ret;
}
static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
if (!ab->hw_params->reoq_lut_support)
return 0;
dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
DP_REOQ_LUT_SIZE,
&dp->reoq_lut.paddr,
GFP_KERNEL | __GFP_ZERO);
if (!dp->reoq_lut.vaddr) {
ath12k_warn(ab, "failed to allocate memory for reoq table");
return -ENOMEM;
}
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
dp->reoq_lut.paddr);
return 0;
}
int ath12k_dp_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct hal_srng *srng = NULL;
size_t size = 0;
u32 n_link_desc = 0;
int ret;
int i;
dp->ab = ab;
INIT_LIST_HEAD(&dp->reo_cmd_list);
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
return ret;
}
srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, srng, n_link_desc);
if (ret) {
ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
return ret;
}
ret = ath12k_dp_cc_init(ab);
if (ret) {
ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
goto fail_link_desc_cleanup;
}
ret = ath12k_dp_init_bank_profiles(ab);
if (ret) {
ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
goto fail_hw_cc_cleanup;
}
ret = ath12k_dp_srng_common_setup(ab);
if (ret)
goto fail_dp_bank_profiles_cleanup;
size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
ret = ath12k_dp_reoq_lut_setup(ab);
if (ret) {
ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
goto fail_cmn_srng_cleanup;
}
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
dp->tx_ring[i].tcl_data_ring_id = i;
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
if (!dp->tx_ring[i].tx_status) {
ret = -ENOMEM;
/* FIXME: The allocated tx status is not freed
* properly here
*/
goto fail_cmn_reoq_cleanup;
}
}
for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
ath12k_hal_tx_set_dscp_tid_map(ab, i);
ret = ath12k_dp_rx_alloc(ab);
if (ret)
goto fail_dp_rx_free;
/* Init any SOC level resource for DP */
return 0;
fail_dp_rx_free:
ath12k_dp_rx_free(ab);
fail_cmn_reoq_cleanup:
ath12k_dp_reoq_lut_cleanup(ab);
fail_cmn_srng_cleanup:
ath12k_dp_srng_common_cleanup(ab);
fail_dp_bank_profiles_cleanup:
ath12k_dp_deinit_bank_profiles(ab);
fail_hw_cc_cleanup:
ath12k_dp_cc_cleanup(ab);
fail_link_desc_cleanup:
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
return ret;
}
|
linux-master
|
drivers/net/wireless/ath/ath12k/dp.c
|
/*
* Atheros CARL9170 driver
*
* LED handling
*
* Copyright 2008, Johannes Berg <[email protected]>
* Copyright 2009, 2010, Christian Lamparer <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "carl9170.h"
#include "cmd.h"
int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state)
{
return carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_DATA, led_state);
}
int carl9170_led_init(struct ar9170 *ar)
{
int err;
/* disable LEDs */
/* GPIO [0/1 mode: output, 2/3: input] */
err = carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3);
if (err)
goto out;
/* GPIO 0/1 value: off */
err = carl9170_led_set_state(ar, 0);
out:
return err;
}
#ifdef CONFIG_CARL9170_LEDS
static void carl9170_led_update(struct work_struct *work)
{
struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
int i, tmp = 300, blink_delay = 1000;
u32 led_val = 0;
bool rerun = false;
if (!IS_ACCEPTING_CMD(ar))
return;
mutex_lock(&ar->mutex);
for (i = 0; i < AR9170_NUM_LEDS; i++) {
if (ar->leds[i].registered) {
if (ar->leds[i].last_state ||
ar->leds[i].toggled) {
if (ar->leds[i].toggled)
tmp = 70 + 200 / (ar->leds[i].toggled);
if (tmp < blink_delay)
blink_delay = tmp;
led_val |= 1 << i;
ar->leds[i].toggled = 0;
rerun = true;
}
}
}
carl9170_led_set_state(ar, led_val);
mutex_unlock(&ar->mutex);
if (!rerun)
return;
ieee80211_queue_delayed_work(ar->hw,
&ar->led_work,
msecs_to_jiffies(blink_delay));
}
static void carl9170_led_set_brightness(struct led_classdev *led,
enum led_brightness brightness)
{
struct carl9170_led *arl = container_of(led, struct carl9170_led, l);
struct ar9170 *ar = arl->ar;
if (!arl->registered)
return;
if (arl->last_state != !!brightness) {
arl->toggled++;
arl->last_state = !!brightness;
}
if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ / 10);
}
static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
const char *trigger)
{
int err;
snprintf(ar->leds[i].name, sizeof(ar->leds[i].name),
"carl9170-%s::%s", wiphy_name(ar->hw->wiphy), name);
ar->leds[i].ar = ar;
ar->leds[i].l.name = ar->leds[i].name;
ar->leds[i].l.brightness_set = carl9170_led_set_brightness;
ar->leds[i].l.brightness = 0;
ar->leds[i].l.default_trigger = trigger;
err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
&ar->leds[i].l);
if (err) {
wiphy_err(ar->hw->wiphy, "failed to register %s LED (%d).\n",
ar->leds[i].name, err);
} else {
ar->leds[i].registered = true;
}
return err;
}
void carl9170_led_unregister(struct ar9170 *ar)
{
int i;
for (i = 0; i < AR9170_NUM_LEDS; i++)
if (ar->leds[i].registered) {
led_classdev_unregister(&ar->leds[i].l);
ar->leds[i].registered = false;
ar->leds[i].toggled = 0;
}
cancel_delayed_work_sync(&ar->led_work);
}
int carl9170_led_register(struct ar9170 *ar)
{
int err;
INIT_DELAYED_WORK(&ar->led_work, carl9170_led_update);
err = carl9170_led_register_led(ar, 0, "tx",
ieee80211_get_tx_led_name(ar->hw));
if (err)
goto fail;
if (ar->features & CARL9170_ONE_LED)
return 0;
err = carl9170_led_register_led(ar, 1, "assoc",
ieee80211_get_assoc_led_name(ar->hw));
if (err)
goto fail;
return 0;
fail:
carl9170_led_unregister(ar);
return err;
}
#endif /* CONFIG_CARL9170_LEDS */
|
linux-master
|
drivers/net/wireless/ath/carl9170/led.c
|
/*
* Atheros CARL9170 driver
*
* 802.11 & command trap routines
*
* Copyright 2008, Johannes Berg <[email protected]>
* Copyright 2009, 2010, Christian Lamparter <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <net/mac80211.h>
#include "carl9170.h"
#include "hw.h"
#include "cmd.h"
static void carl9170_dbg_message(struct ar9170 *ar, const char *buf, u32 len)
{
bool restart = false;
enum carl9170_restart_reasons reason = CARL9170_RR_NO_REASON;
if (len > 3) {
if (memcmp(buf, CARL9170_ERR_MAGIC, 3) == 0) {
ar->fw.err_counter++;
if (ar->fw.err_counter > 3) {
restart = true;
reason = CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS;
}
}
if (memcmp(buf, CARL9170_BUG_MAGIC, 3) == 0) {
ar->fw.bug_counter++;
restart = true;
reason = CARL9170_RR_FATAL_FIRMWARE_ERROR;
}
}
wiphy_info(ar->hw->wiphy, "FW: %.*s\n", len, buf);
if (restart)
carl9170_restart(ar, reason);
}
static void carl9170_handle_ps(struct ar9170 *ar, struct carl9170_rsp *rsp)
{
u32 ps;
bool new_ps;
ps = le32_to_cpu(rsp->psm.state);
new_ps = (ps & CARL9170_PSM_COUNTER) != CARL9170_PSM_WAKE;
if (ar->ps.state != new_ps) {
if (!new_ps) {
ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
ar->ps.last_action);
}
ar->ps.last_action = jiffies;
ar->ps.state = new_ps;
}
}
static int carl9170_check_sequence(struct ar9170 *ar, unsigned int seq)
{
if (ar->cmd_seq < -1)
return 0;
/*
* Initialize Counter
*/
if (ar->cmd_seq < 0)
ar->cmd_seq = seq;
/*
* The sequence is strictly monotonic increasing and it never skips!
*
* Therefore we can safely assume that whenever we received an
* unexpected sequence we have lost some valuable data.
*/
if (seq != ar->cmd_seq) {
int count;
count = (seq - ar->cmd_seq) % ar->fw.cmd_bufs;
wiphy_err(ar->hw->wiphy, "lost %d command responses/traps! "
"w:%d g:%d\n", count, ar->cmd_seq, seq);
carl9170_restart(ar, CARL9170_RR_LOST_RSP);
return -EIO;
}
ar->cmd_seq = (ar->cmd_seq + 1) % ar->fw.cmd_bufs;
return 0;
}
static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer)
{
/*
* Some commands may have a variable response length
* and we cannot predict the correct length in advance.
* So we only check if we provided enough space for the data.
*/
if (unlikely(ar->readlen != (len - 4))) {
dev_warn(&ar->udev->dev, "received invalid command response:"
"got %d, instead of %d\n", len - 4, ar->readlen);
print_hex_dump_bytes("carl9170 cmd:", DUMP_PREFIX_OFFSET,
ar->cmd_buf, (ar->cmd.hdr.len + 4) & 0x3f);
print_hex_dump_bytes("carl9170 rsp:", DUMP_PREFIX_OFFSET,
buffer, len);
/*
* Do not complete. The command times out,
* and we get a stack trace from there.
*/
carl9170_restart(ar, CARL9170_RR_INVALID_RSP);
}
spin_lock(&ar->cmd_lock);
if (ar->readbuf) {
if (len >= 4)
memcpy(ar->readbuf, buffer + 4, len - 4);
ar->readbuf = NULL;
}
complete(&ar->cmd_wait);
spin_unlock(&ar->cmd_lock);
}
void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
{
struct carl9170_rsp *cmd = buf;
struct ieee80211_vif *vif;
if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) {
if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG))
carl9170_cmd_callback(ar, len, buf);
return;
}
if (unlikely(cmd->hdr.len != (len - 4))) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "FW: received over-/under"
"sized event %x (%d, but should be %d).\n",
cmd->hdr.cmd, cmd->hdr.len, len - 4);
print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE,
buf, len);
}
return;
}
/* hardware event handlers */
switch (cmd->hdr.cmd) {
case CARL9170_RSP_PRETBTT:
/* pre-TBTT event */
rcu_read_lock();
vif = carl9170_get_main_vif(ar);
if (!vif) {
rcu_read_unlock();
break;
}
switch (vif->type) {
case NL80211_IFTYPE_STATION:
carl9170_handle_ps(ar, cmd);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
carl9170_update_beacon(ar, true);
break;
default:
break;
}
rcu_read_unlock();
break;
case CARL9170_RSP_TXCOMP:
/* TX status notification */
carl9170_tx_process_status(ar, cmd);
break;
case CARL9170_RSP_BEACON_CONFIG:
/*
* (IBSS) beacon send notification
* bytes: 04 c2 XX YY B4 B3 B2 B1
*
* XX always 80
* YY always 00
* B1-B4 "should" be the number of send out beacons.
*/
break;
case CARL9170_RSP_ATIM:
/* End of Atim Window */
break;
case CARL9170_RSP_WATCHDOG:
/* Watchdog Interrupt */
carl9170_restart(ar, CARL9170_RR_WATCHDOG);
break;
case CARL9170_RSP_TEXT:
/* firmware debug */
carl9170_dbg_message(ar, (char *)buf + 4, len - 4);
break;
case CARL9170_RSP_HEXDUMP:
wiphy_dbg(ar->hw->wiphy, "FW: HD %d\n", len - 4);
print_hex_dump_bytes("FW:", DUMP_PREFIX_NONE,
(char *)buf + 4, len - 4);
break;
case CARL9170_RSP_RADAR:
if (!net_ratelimit())
break;
wiphy_info(ar->hw->wiphy, "FW: RADAR! Please report this "
"incident to [email protected] !\n");
break;
case CARL9170_RSP_GPIO:
#ifdef CONFIG_CARL9170_WPC
if (ar->wps.pbc) {
bool state = !!(cmd->gpio.gpio & cpu_to_le32(
AR9170_GPIO_PORT_WPS_BUTTON_PRESSED));
if (state != ar->wps.pbc_state) {
ar->wps.pbc_state = state;
input_report_key(ar->wps.pbc, KEY_WPS_BUTTON,
state);
input_sync(ar->wps.pbc);
}
}
#endif /* CONFIG_CARL9170_WPC */
break;
case CARL9170_RSP_BOOT:
complete(&ar->fw_boot_wait);
break;
default:
wiphy_err(ar->hw->wiphy, "FW: received unhandled event %x\n",
cmd->hdr.cmd);
print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
break;
}
}
static int carl9170_rx_mac_status(struct ar9170 *ar,
struct ar9170_rx_head *head, struct ar9170_rx_macstatus *mac,
struct ieee80211_rx_status *status)
{
struct ieee80211_channel *chan;
u8 error, decrypt;
BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
error = mac->error;
if (error & AR9170_RX_ERROR_WRONG_RA) {
if (!ar->sniffer_enabled)
return -EINVAL;
}
if (error & AR9170_RX_ERROR_PLCP) {
if (!(ar->filter_state & FIF_PLCPFAIL))
return -EINVAL;
status->flag |= RX_FLAG_FAILED_PLCP_CRC;
}
if (error & AR9170_RX_ERROR_FCS) {
ar->tx_fcs_errors++;
if (!(ar->filter_state & FIF_FCSFAIL))
return -EINVAL;
status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
decrypt = ar9170_get_decrypt_type(mac);
if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
decrypt != AR9170_ENC_ALG_NONE) {
if ((decrypt == AR9170_ENC_ALG_TKIP) &&
(error & AR9170_RX_ERROR_MMIC))
status->flag |= RX_FLAG_MMIC_ERROR;
status->flag |= RX_FLAG_DECRYPTED;
}
if (error & AR9170_RX_ERROR_DECRYPT && !ar->sniffer_enabled)
return -ENODATA;
error &= ~(AR9170_RX_ERROR_MMIC |
AR9170_RX_ERROR_FCS |
AR9170_RX_ERROR_WRONG_RA |
AR9170_RX_ERROR_DECRYPT |
AR9170_RX_ERROR_PLCP);
/* drop any other error frames */
if (unlikely(error)) {
/* TODO: update netdevice's RX dropped/errors statistics */
if (net_ratelimit())
wiphy_dbg(ar->hw->wiphy, "received frame with "
"suspicious error code (%#x).\n", error);
return -EINVAL;
}
chan = ar->channel;
if (chan) {
status->band = chan->band;
status->freq = chan->center_freq;
}
switch (mac->status & AR9170_RX_STATUS_MODULATION) {
case AR9170_RX_STATUS_MODULATION_CCK:
if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
switch (head->plcp[0]) {
case AR9170_RX_PHY_RATE_CCK_1M:
status->rate_idx = 0;
break;
case AR9170_RX_PHY_RATE_CCK_2M:
status->rate_idx = 1;
break;
case AR9170_RX_PHY_RATE_CCK_5M:
status->rate_idx = 2;
break;
case AR9170_RX_PHY_RATE_CCK_11M:
status->rate_idx = 3;
break;
default:
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "invalid plcp cck "
"rate (%x).\n", head->plcp[0]);
}
return -EINVAL;
}
break;
case AR9170_RX_STATUS_MODULATION_DUPOFDM:
case AR9170_RX_STATUS_MODULATION_OFDM:
switch (head->plcp[0] & 0xf) {
case AR9170_TXRX_PHY_RATE_OFDM_6M:
status->rate_idx = 0;
break;
case AR9170_TXRX_PHY_RATE_OFDM_9M:
status->rate_idx = 1;
break;
case AR9170_TXRX_PHY_RATE_OFDM_12M:
status->rate_idx = 2;
break;
case AR9170_TXRX_PHY_RATE_OFDM_18M:
status->rate_idx = 3;
break;
case AR9170_TXRX_PHY_RATE_OFDM_24M:
status->rate_idx = 4;
break;
case AR9170_TXRX_PHY_RATE_OFDM_36M:
status->rate_idx = 5;
break;
case AR9170_TXRX_PHY_RATE_OFDM_48M:
status->rate_idx = 6;
break;
case AR9170_TXRX_PHY_RATE_OFDM_54M:
status->rate_idx = 7;
break;
default:
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "invalid plcp ofdm "
"rate (%x).\n", head->plcp[0]);
}
return -EINVAL;
}
if (status->band == NL80211_BAND_2GHZ)
status->rate_idx += 4;
break;
case AR9170_RX_STATUS_MODULATION_HT:
if (head->plcp[3] & 0x80)
status->bw = RATE_INFO_BW_40;
if (head->plcp[6] & 0x80)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
status->rate_idx = clamp(head->plcp[3] & 0x7f, 0, 75);
status->encoding = RX_ENC_HT;
break;
default:
BUG();
return -ENOSYS;
}
return 0;
}
static void carl9170_rx_phy_status(struct ar9170 *ar,
struct ar9170_rx_phystatus *phy, struct ieee80211_rx_status *status)
{
int i;
BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
for (i = 0; i < 3; i++)
if (phy->rssi[i] != 0x80)
status->antenna |= BIT(i);
/* post-process RSSI */
for (i = 0; i < 7; i++)
if (phy->rssi[i] & 0x80)
phy->rssi[i] = ((~phy->rssi[i] & 0x7f) + 1) & 0x7f;
/* TODO: we could do something with phy_errors */
status->signal = ar->noise[0] + phy->rssi_combined;
}
static struct sk_buff *carl9170_rx_copy_data(u8 *buf, int len)
{
struct sk_buff *skb;
int reserved = 0;
struct ieee80211_hdr *hdr = (void *) buf;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
reserved += NET_IP_ALIGN;
if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
reserved += NET_IP_ALIGN;
}
if (ieee80211_has_a4(hdr->frame_control))
reserved += NET_IP_ALIGN;
reserved = 32 + (reserved & NET_IP_ALIGN);
skb = dev_alloc_skb(len + reserved);
if (likely(skb)) {
skb_reserve(skb, reserved);
skb_put_data(skb, buf, len);
}
return skb;
}
static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie)
{
struct ieee80211_mgmt *mgmt = (void *)data;
u8 *pos, *end;
pos = (u8 *)mgmt->u.beacon.variable;
end = data + len;
while (pos < end) {
if (pos + 2 + pos[1] > end)
return NULL;
if (pos[0] == ie)
return pos;
pos += 2 + pos[1];
}
return NULL;
}
/*
* NOTE:
*
* The firmware is in charge of waking up the device just before
* the AP is expected to transmit the next beacon.
*
* This leaves the driver with the important task of deciding when
* to set the PHY back to bed again.
*/
static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
{
struct ieee80211_hdr *hdr = data;
struct ieee80211_tim_ie *tim_ie;
struct ath_common *common = &ar->common;
u8 *tim;
u8 tim_len;
bool cam;
if (likely(!(ar->hw->conf.flags & IEEE80211_CONF_PS)))
return;
/* min. beacon length + FCS_LEN */
if (len <= 40 + FCS_LEN)
return;
/* check if this really is a beacon */
/* and only beacons from the associated BSSID, please */
if (!ath_is_mybeacon(common, hdr) || !common->curaid)
return;
ar->ps.last_beacon = jiffies;
tim = carl9170_find_ie(data, len - FCS_LEN, WLAN_EID_TIM);
if (!tim)
return;
if (tim[1] < sizeof(*tim_ie))
return;
tim_len = tim[1];
tim_ie = (struct ieee80211_tim_ie *) &tim[2];
if (!WARN_ON_ONCE(!ar->hw->conf.ps_dtim_period))
ar->ps.dtim_counter = (tim_ie->dtim_count - 1) %
ar->hw->conf.ps_dtim_period;
/* Check whenever the PHY can be turned off again. */
/* 1. What about buffered unicast traffic for our AID? */
cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
cam |= !!(tim_ie->bitmap_ctrl & 0x01);
if (!cam) {
/* back to low-power land. */
ar->ps.off_override &= ~PS_OFF_BCN;
carl9170_ps_check(ar);
} else {
/* force CAM */
ar->ps.off_override |= PS_OFF_BCN;
}
}
static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
{
struct ieee80211_bar *bar = data;
struct carl9170_bar_list_entry *entry;
unsigned int queue;
if (likely(!ieee80211_is_back(bar->frame_control)))
return;
if (len <= sizeof(*bar) + FCS_LEN)
return;
queue = TID_TO_WME_AC(((le16_to_cpu(bar->control) &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT) & 7);
rcu_read_lock();
list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
struct sk_buff *entry_skb = entry->skb;
struct _carl9170_tx_superframe *super = (void *)entry_skb->data;
struct ieee80211_bar *entry_bar = (void *)super->frame_data;
#define TID_CHECK(a, b) ( \
((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
if (bar->start_seq_num == entry_bar->start_seq_num &&
TID_CHECK(bar->control, entry_bar->control) &&
ether_addr_equal_64bits(bar->ra, entry_bar->ta) &&
ether_addr_equal_64bits(bar->ta, entry_bar->ra)) {
struct ieee80211_tx_info *tx_info;
tx_info = IEEE80211_SKB_CB(entry_skb);
tx_info->flags |= IEEE80211_TX_STAT_ACK;
spin_lock_bh(&ar->bar_list_lock[queue]);
list_del_rcu(&entry->list);
spin_unlock_bh(&ar->bar_list_lock[queue]);
kfree_rcu(entry, head);
break;
}
}
rcu_read_unlock();
#undef TID_CHECK
}
static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
struct ieee80211_rx_status *rx_status)
{
__le16 fc;
if ((ms & AR9170_RX_STATUS_MPDU) == AR9170_RX_STATUS_MPDU_SINGLE) {
/*
* This frame is not part of an aMPDU.
* Therefore it is not subjected to any
* of the following content restrictions.
*/
return true;
}
rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
rx_status->ampdu_reference = ar->ampdu_ref;
/*
* "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
* certain frame types can be part of an aMPDU.
*
* In order to keep the processing cost down, I opted for a
* stateless filter solely based on the frame control field.
*/
fc = ((struct ieee80211_hdr *)buf)->frame_control;
if (ieee80211_is_data_qos(fc) && ieee80211_is_data_present(fc))
return true;
if (ieee80211_is_ack(fc) || ieee80211_is_back(fc) ||
ieee80211_is_back_req(fc))
return true;
if (ieee80211_is_action(fc))
return true;
return false;
}
static int carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len,
struct ieee80211_rx_status *status)
{
struct sk_buff *skb;
/* (driver) frame trap handler
*
* Because power-saving mode handing has to be implemented by
* the driver/firmware. We have to check each incoming beacon
* from the associated AP, if there's new data for us (either
* broadcast/multicast or unicast) we have to react quickly.
*
* So, if you have you want to add additional frame trap
* handlers, this would be the perfect place!
*/
carl9170_ps_beacon(ar, buf, len);
carl9170_ba_check(ar, buf, len);
skb = carl9170_rx_copy_data(buf, len);
if (!skb)
return -ENOMEM;
memcpy(IEEE80211_SKB_RXCB(skb), status, sizeof(*status));
ieee80211_rx(ar->hw, skb);
return 0;
}
/*
* If the frame alignment is right (or the kernel has
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
* is only a single MPDU in the USB frame, then we could
* submit to mac80211 the SKB directly. However, since
* there may be multiple packets in one SKB in stream
* mode, and we need to observe the proper ordering,
* this is non-trivial.
*/
static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
{
struct ar9170_rx_head *head;
struct ar9170_rx_macstatus *mac;
struct ar9170_rx_phystatus *phy = NULL;
struct ieee80211_rx_status status;
int mpdu_len;
u8 mac_status;
if (!IS_STARTED(ar))
return;
if (unlikely(len < sizeof(*mac)))
goto drop;
memset(&status, 0, sizeof(status));
mpdu_len = len - sizeof(*mac);
mac = (void *)(buf + mpdu_len);
mac_status = mac->status;
switch (mac_status & AR9170_RX_STATUS_MPDU) {
case AR9170_RX_STATUS_MPDU_FIRST:
ar->ampdu_ref++;
/* Aggregated MPDUs start with an PLCP header */
if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
head = (void *) buf;
/*
* The PLCP header needs to be cached for the
* following MIDDLE + LAST A-MPDU packets.
*
* So, if you are wondering why all frames seem
* to share a common RX status information,
* then you have the answer right here...
*/
memcpy(&ar->rx_plcp, (void *) buf,
sizeof(struct ar9170_rx_head));
mpdu_len -= sizeof(struct ar9170_rx_head);
buf += sizeof(struct ar9170_rx_head);
ar->rx_has_plcp = true;
} else {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "plcp info "
"is clipped.\n");
}
goto drop;
}
break;
case AR9170_RX_STATUS_MPDU_LAST:
status.flag |= RX_FLAG_AMPDU_IS_LAST;
/*
* The last frame of an A-MPDU has an extra tail
* which does contain the phy status of the whole
* aggregate.
*/
if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
mpdu_len -= sizeof(struct ar9170_rx_phystatus);
phy = (void *)(buf + mpdu_len);
} else {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "frame tail "
"is clipped.\n");
}
goto drop;
}
fallthrough;
case AR9170_RX_STATUS_MPDU_MIDDLE:
/* These are just data + mac status */
if (unlikely(!ar->rx_has_plcp)) {
if (!net_ratelimit())
return;
wiphy_err(ar->hw->wiphy, "rx stream does not start "
"with a first_mpdu frame tag.\n");
goto drop;
}
head = &ar->rx_plcp;
break;
case AR9170_RX_STATUS_MPDU_SINGLE:
/* single mpdu has both: plcp (head) and phy status (tail) */
head = (void *) buf;
mpdu_len -= sizeof(struct ar9170_rx_head);
mpdu_len -= sizeof(struct ar9170_rx_phystatus);
buf += sizeof(struct ar9170_rx_head);
phy = (void *)(buf + mpdu_len);
break;
default:
BUG();
break;
}
/* FC + DU + RA + FCS */
if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
goto drop;
if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
goto drop;
if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
goto drop;
if (phy)
carl9170_rx_phy_status(ar, phy, &status);
else
status.flag |= RX_FLAG_NO_SIGNAL_VAL;
if (carl9170_handle_mpdu(ar, buf, mpdu_len, &status))
goto drop;
return;
drop:
ar->rx_dropped++;
}
static void carl9170_rx_untie_cmds(struct ar9170 *ar, const u8 *respbuf,
const unsigned int resplen)
{
struct carl9170_rsp *cmd;
int i = 0;
while (i < resplen) {
cmd = (void *) &respbuf[i];
i += cmd->hdr.len + 4;
if (unlikely(i > resplen))
break;
if (carl9170_check_sequence(ar, cmd->hdr.seq))
break;
carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4);
}
if (unlikely(i != resplen)) {
if (!net_ratelimit())
return;
wiphy_err(ar->hw->wiphy, "malformed firmware trap:\n");
print_hex_dump_bytes("rxcmd:", DUMP_PREFIX_OFFSET,
respbuf, resplen);
}
}
static void __carl9170_rx(struct ar9170 *ar, u8 *buf, unsigned int len)
{
unsigned int i = 0;
/* weird thing, but this is the same in the original driver */
while (len > 2 && i < 12 && buf[0] == 0xff && buf[1] == 0xff) {
i += 2;
len -= 2;
buf += 2;
}
if (unlikely(len < 4))
return;
/* found the 6 * 0xffff marker? */
if (i == 12)
carl9170_rx_untie_cmds(ar, buf, len);
else
carl9170_rx_untie_data(ar, buf, len);
}
static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
{
unsigned int tlen, wlen = 0, clen = 0;
struct ar9170_stream *rx_stream;
u8 *tbuf;
tbuf = buf;
tlen = len;
while (tlen >= 4) {
rx_stream = (void *) tbuf;
clen = le16_to_cpu(rx_stream->length);
wlen = ALIGN(clen, 4);
/* check if this is stream has a valid tag.*/
if (rx_stream->tag != cpu_to_le16(AR9170_RX_STREAM_TAG)) {
/*
* TODO: handle the highly unlikely event that the
* corrupted stream has the TAG at the right position.
*/
/* check if the frame can be repaired. */
if (!ar->rx_failover_missing) {
/* this is not "short read". */
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy,
"missing tag!\n");
}
__carl9170_rx(ar, tbuf, tlen);
return;
}
if (ar->rx_failover_missing > tlen) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy,
"possible multi "
"stream corruption!\n");
goto err_telluser;
} else {
goto err_silent;
}
}
skb_put_data(ar->rx_failover, tbuf, tlen);
ar->rx_failover_missing -= tlen;
if (ar->rx_failover_missing <= 0) {
/*
* nested carl9170_rx_stream call!
*
* termination is guaranteed, even when the
* combined frame also have an element with
* a bad tag.
*/
ar->rx_failover_missing = 0;
carl9170_rx_stream(ar, ar->rx_failover->data,
ar->rx_failover->len);
skb_reset_tail_pointer(ar->rx_failover);
skb_trim(ar->rx_failover, 0);
}
return;
}
/* check if stream is clipped */
if (wlen > tlen - 4) {
if (ar->rx_failover_missing) {
/* TODO: handle double stream corruption. */
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "double rx "
"stream corruption!\n");
goto err_telluser;
} else {
goto err_silent;
}
}
/*
* save incomplete data set.
* the firmware will resend the missing bits when
* the rx - descriptor comes round again.
*/
skb_put_data(ar->rx_failover, tbuf, tlen);
ar->rx_failover_missing = clen - tlen;
return;
}
__carl9170_rx(ar, rx_stream->payload, clen);
tbuf += wlen + 4;
tlen -= wlen + 4;
}
if (tlen) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "%d bytes of unprocessed "
"data left in rx stream!\n", tlen);
}
goto err_telluser;
}
return;
err_telluser:
wiphy_err(ar->hw->wiphy, "damaged RX stream data [want:%d, "
"data:%d, rx:%d, pending:%d ]\n", clen, wlen, tlen,
ar->rx_failover_missing);
if (ar->rx_failover_missing)
print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
ar->rx_failover->data,
ar->rx_failover->len);
print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
buf, len);
wiphy_err(ar->hw->wiphy, "please check your hardware and cables, if "
"you see this message frequently.\n");
err_silent:
if (ar->rx_failover_missing) {
skb_reset_tail_pointer(ar->rx_failover);
skb_trim(ar->rx_failover, 0);
ar->rx_failover_missing = 0;
}
}
void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len)
{
if (ar->fw.rx_stream)
carl9170_rx_stream(ar, buf, len);
else
__carl9170_rx(ar, buf, len);
}
|
linux-master
|
drivers/net/wireless/ath/carl9170/rx.c
|
/*
* Atheros CARL9170 driver
*
* USB - frontend
*
* Copyright 2008, Johannes Berg <[email protected]>
* Copyright 2009, 2010, Christian Lamparter <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/device.h>
#include <net/mac80211.h>
#include "carl9170.h"
#include "cmd.h"
#include "hw.h"
#include "fwcmd.h"
MODULE_AUTHOR("Johannes Berg <[email protected]>");
MODULE_AUTHOR("Christian Lamparter <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
MODULE_FIRMWARE(CARL9170FW_NAME);
MODULE_ALIAS("ar9170usb");
MODULE_ALIAS("arusb_lnx");
/*
* Note:
*
* Always update our wiki's device list (located at:
* https://wireless.wiki.kernel.org/en/users/Drivers/ar9170/devices ),
* whenever you add a new device.
*/
static const struct usb_device_id carl9170_usb_ids[] = {
/* Atheros 9170 */
{ USB_DEVICE(0x0cf3, 0x9170) },
/* Atheros TG121N */
{ USB_DEVICE(0x0cf3, 0x1001) },
/* TP-Link TL-WN821N v2 */
{ USB_DEVICE(0x0cf3, 0x1002), .driver_info = CARL9170_WPS_BUTTON |
CARL9170_ONE_LED },
/* 3Com Dual Band 802.11n USB Adapter */
{ USB_DEVICE(0x0cf3, 0x1010) },
/* H3C Dual Band 802.11n USB Adapter */
{ USB_DEVICE(0x0cf3, 0x1011) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160 A1 */
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
/* D-Link DWA 130 D */
{ USB_DEVICE(0x07d1, 0x3a0f) },
/* Netgear WNA1000 */
{ USB_DEVICE(0x0846, 0x9040) },
/* Netgear WNDA3100 (v1) */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
/* Zydas ZD1221 */
{ USB_DEVICE(0x0ace, 0x1221) },
/* Proxim ORiNOCO 802.11n USB */
{ USB_DEVICE(0x1435, 0x0804) },
/* WNC Generic 11n USB Dongle */
{ USB_DEVICE(0x1435, 0x0326) },
/* ZyXEL NWD271N */
{ USB_DEVICE(0x0586, 0x3417) },
/* Z-Com UB81 BG */
{ USB_DEVICE(0x0cde, 0x0023) },
/* Z-Com UB82 ABG */
{ USB_DEVICE(0x0cde, 0x0026) },
/* Sphairon Homelink 1202 */
{ USB_DEVICE(0x0cde, 0x0027) },
/* Arcadyan WN7512 */
{ USB_DEVICE(0x083a, 0xf522) },
/* Planex GWUS300 */
{ USB_DEVICE(0x2019, 0x5304) },
/* IO-Data WNGDNUS2 */
{ USB_DEVICE(0x04bb, 0x093f) },
/* NEC WL300NU-G */
{ USB_DEVICE(0x0409, 0x0249) },
/* NEC WL300NU-AG */
{ USB_DEVICE(0x0409, 0x02b4) },
/* AVM FRITZ!WLAN USB Stick N */
{ USB_DEVICE(0x057c, 0x8401) },
/* AVM FRITZ!WLAN USB Stick N 2.4 */
{ USB_DEVICE(0x057c, 0x8402) },
/* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
{ USB_DEVICE(0x1668, 0x1200) },
/* Airlive X.USB a/b/g/n */
{ USB_DEVICE(0x1b75, 0x9170) },
/* terminate */
{}
};
MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
static struct usb_driver carl9170_driver;
static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
{
struct urb *urb;
int err;
if (atomic_inc_return(&ar->tx_anch_urbs) > AR9170_NUM_TX_URBS)
goto err_acc;
urb = usb_get_from_anchor(&ar->tx_wait);
if (!urb)
goto err_acc;
usb_anchor_urb(urb, &ar->tx_anch);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(err)) {
if (net_ratelimit()) {
dev_err(&ar->udev->dev, "tx submit failed (%d)\n",
urb->status);
}
usb_unanchor_urb(urb);
usb_anchor_urb(urb, &ar->tx_err);
}
usb_free_urb(urb);
if (likely(err == 0))
return;
err_acc:
atomic_dec(&ar->tx_anch_urbs);
}
static void carl9170_usb_tx_data_complete(struct urb *urb)
{
struct ar9170 *ar = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
if (WARN_ON_ONCE(!ar)) {
dev_kfree_skb_irq(urb->context);
return;
}
atomic_dec(&ar->tx_anch_urbs);
switch (urb->status) {
/* everything is fine */
case 0:
carl9170_tx_callback(ar, (void *)urb->context);
break;
/* disconnect */
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
/*
* Defer the frame clean-up to the tasklet worker.
* This is necessary, because carl9170_tx_drop
* does not work in an irqsave context.
*/
usb_anchor_urb(urb, &ar->tx_err);
return;
/* a random transmission error has occurred? */
default:
if (net_ratelimit()) {
dev_err(&ar->udev->dev, "tx failed (%d)\n",
urb->status);
}
usb_anchor_urb(urb, &ar->tx_err);
break;
}
if (likely(IS_STARTED(ar)))
carl9170_usb_submit_data_urb(ar);
}
static int carl9170_usb_submit_cmd_urb(struct ar9170 *ar)
{
struct urb *urb;
int err;
if (atomic_inc_return(&ar->tx_cmd_urbs) != 1) {
atomic_dec(&ar->tx_cmd_urbs);
return 0;
}
urb = usb_get_from_anchor(&ar->tx_cmd);
if (!urb) {
atomic_dec(&ar->tx_cmd_urbs);
return 0;
}
usb_anchor_urb(urb, &ar->tx_anch);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(err)) {
usb_unanchor_urb(urb);
atomic_dec(&ar->tx_cmd_urbs);
}
usb_free_urb(urb);
return err;
}
static void carl9170_usb_cmd_complete(struct urb *urb)
{
struct ar9170 *ar = urb->context;
int err = 0;
if (WARN_ON_ONCE(!ar))
return;
atomic_dec(&ar->tx_cmd_urbs);
switch (urb->status) {
/* everything is fine */
case 0:
break;
/* disconnect */
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
return;
default:
err = urb->status;
break;
}
if (!IS_INITIALIZED(ar))
return;
if (err)
dev_err(&ar->udev->dev, "submit cmd cb failed (%d).\n", err);
err = carl9170_usb_submit_cmd_urb(ar);
if (err)
dev_err(&ar->udev->dev, "submit cmd failed (%d).\n", err);
}
static void carl9170_usb_rx_irq_complete(struct urb *urb)
{
struct ar9170 *ar = urb->context;
if (WARN_ON_ONCE(!ar))
return;
switch (urb->status) {
/* everything is fine */
case 0:
break;
/* disconnect */
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
return;
default:
goto resubmit;
}
/*
* While the carl9170 firmware does not use this EP, the
* firmware loader in the EEPROM unfortunately does.
* Therefore we need to be ready to handle out-of-band
* responses and traps in case the firmware crashed and
* the loader took over again.
*/
carl9170_handle_command_response(ar, urb->transfer_buffer,
urb->actual_length);
resubmit:
usb_anchor_urb(urb, &ar->rx_anch);
if (unlikely(usb_submit_urb(urb, GFP_ATOMIC)))
usb_unanchor_urb(urb);
}
static int carl9170_usb_submit_rx_urb(struct ar9170 *ar, gfp_t gfp)
{
struct urb *urb;
int err = 0, runs = 0;
while ((atomic_read(&ar->rx_anch_urbs) < AR9170_NUM_RX_URBS) &&
(runs++ < AR9170_NUM_RX_URBS)) {
err = -ENOSPC;
urb = usb_get_from_anchor(&ar->rx_pool);
if (urb) {
usb_anchor_urb(urb, &ar->rx_anch);
err = usb_submit_urb(urb, gfp);
if (unlikely(err)) {
usb_unanchor_urb(urb);
usb_anchor_urb(urb, &ar->rx_pool);
} else {
atomic_dec(&ar->rx_pool_urbs);
atomic_inc(&ar->rx_anch_urbs);
}
usb_free_urb(urb);
}
}
return err;
}
static void carl9170_usb_rx_work(struct ar9170 *ar)
{
struct urb *urb;
int i;
for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) {
urb = usb_get_from_anchor(&ar->rx_work);
if (!urb)
break;
atomic_dec(&ar->rx_work_urbs);
if (IS_INITIALIZED(ar)) {
carl9170_rx(ar, urb->transfer_buffer,
urb->actual_length);
}
usb_anchor_urb(urb, &ar->rx_pool);
atomic_inc(&ar->rx_pool_urbs);
usb_free_urb(urb);
carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC);
}
}
void carl9170_usb_handle_tx_err(struct ar9170 *ar)
{
struct urb *urb;
while ((urb = usb_get_from_anchor(&ar->tx_err))) {
struct sk_buff *skb = (void *)urb->context;
carl9170_tx_drop(ar, skb);
carl9170_tx_callback(ar, skb);
usb_free_urb(urb);
}
}
static void carl9170_usb_tasklet(struct tasklet_struct *t)
{
struct ar9170 *ar = from_tasklet(ar, t, usb_tasklet);
if (!IS_INITIALIZED(ar))
return;
carl9170_usb_rx_work(ar);
/*
* Strictly speaking: The tx scheduler is not part of the USB system.
* But the rx worker returns frames back to the mac80211-stack and
* this is the _perfect_ place to generate the next transmissions.
*/
if (IS_STARTED(ar))
carl9170_tx_scheduler(ar);
}
static void carl9170_usb_rx_complete(struct urb *urb)
{
struct ar9170 *ar = (struct ar9170 *)urb->context;
int err;
if (WARN_ON_ONCE(!ar))
return;
atomic_dec(&ar->rx_anch_urbs);
switch (urb->status) {
case 0:
/* rx path */
usb_anchor_urb(urb, &ar->rx_work);
atomic_inc(&ar->rx_work_urbs);
break;
case -ENOENT:
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
/* handle disconnect events*/
return;
default:
/* handle all other errors */
usb_anchor_urb(urb, &ar->rx_pool);
atomic_inc(&ar->rx_pool_urbs);
break;
}
err = carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC);
if (unlikely(err)) {
/*
* usb_submit_rx_urb reported a problem.
* In case this is due to a rx buffer shortage,
* elevate the tasklet worker priority to
* the highest available level.
*/
tasklet_hi_schedule(&ar->usb_tasklet);
if (atomic_read(&ar->rx_anch_urbs) == 0) {
/*
* The system is too slow to cope with
* the enormous workload. We have simply
* run out of active rx urbs and this
* unfortunately leads to an unpredictable
* device.
*/
ieee80211_queue_work(ar->hw, &ar->ping_work);
}
} else {
/*
* Using anything less than _high_ priority absolutely
* kills the rx performance my UP-System...
*/
tasklet_hi_schedule(&ar->usb_tasklet);
}
}
static struct urb *carl9170_usb_alloc_rx_urb(struct ar9170 *ar, gfp_t gfp)
{
struct urb *urb;
void *buf;
buf = kmalloc(ar->fw.rx_size, gfp);
if (!buf)
return NULL;
urb = usb_alloc_urb(0, gfp);
if (!urb) {
kfree(buf);
return NULL;
}
usb_fill_bulk_urb(urb, ar->udev, usb_rcvbulkpipe(ar->udev,
AR9170_USB_EP_RX), buf, ar->fw.rx_size,
carl9170_usb_rx_complete, ar);
urb->transfer_flags |= URB_FREE_BUFFER;
return urb;
}
static int carl9170_usb_send_rx_irq_urb(struct ar9170 *ar)
{
struct urb *urb = NULL;
void *ibuf;
int err = -ENOMEM;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
goto out;
ibuf = kmalloc(AR9170_USB_EP_CTRL_MAX, GFP_KERNEL);
if (!ibuf)
goto out;
usb_fill_int_urb(urb, ar->udev, usb_rcvintpipe(ar->udev,
AR9170_USB_EP_IRQ), ibuf, AR9170_USB_EP_CTRL_MAX,
carl9170_usb_rx_irq_complete, ar, 1);
urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &ar->rx_anch);
err = usb_submit_urb(urb, GFP_KERNEL);
if (err)
usb_unanchor_urb(urb);
out:
usb_free_urb(urb);
return err;
}
static int carl9170_usb_init_rx_bulk_urbs(struct ar9170 *ar)
{
struct urb *urb;
int i, err = -EINVAL;
/*
* The driver actively maintains a second shadow
* pool for inactive, but fully-prepared rx urbs.
*
* The pool should help the driver to master huge
* workload spikes without running the risk of
* undersupplying the hardware or wasting time by
* processing rx data (streams) inside the urb
* completion (hardirq context).
*/
for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) {
urb = carl9170_usb_alloc_rx_urb(ar, GFP_KERNEL);
if (!urb) {
err = -ENOMEM;
goto err_out;
}
usb_anchor_urb(urb, &ar->rx_pool);
atomic_inc(&ar->rx_pool_urbs);
usb_free_urb(urb);
}
err = carl9170_usb_submit_rx_urb(ar, GFP_KERNEL);
if (err)
goto err_out;
/* the device now waiting for the firmware. */
carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE);
return 0;
err_out:
usb_scuttle_anchored_urbs(&ar->rx_pool);
usb_scuttle_anchored_urbs(&ar->rx_work);
usb_kill_anchored_urbs(&ar->rx_anch);
return err;
}
static int carl9170_usb_flush(struct ar9170 *ar)
{
struct urb *urb;
int ret, err = 0;
while ((urb = usb_get_from_anchor(&ar->tx_wait))) {
struct sk_buff *skb = (void *)urb->context;
carl9170_tx_drop(ar, skb);
carl9170_tx_callback(ar, skb);
usb_free_urb(urb);
}
ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000);
if (ret == 0)
err = -ETIMEDOUT;
/* lets wait a while until the tx - queues are dried out */
ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000);
if (ret == 0)
err = -ETIMEDOUT;
usb_kill_anchored_urbs(&ar->tx_anch);
carl9170_usb_handle_tx_err(ar);
return err;
}
static void carl9170_usb_cancel_urbs(struct ar9170 *ar)
{
int err;
carl9170_set_state(ar, CARL9170_UNKNOWN_STATE);
err = carl9170_usb_flush(ar);
if (err)
dev_err(&ar->udev->dev, "stuck tx urbs!\n");
usb_poison_anchored_urbs(&ar->tx_anch);
carl9170_usb_handle_tx_err(ar);
usb_poison_anchored_urbs(&ar->rx_anch);
tasklet_kill(&ar->usb_tasklet);
usb_scuttle_anchored_urbs(&ar->rx_work);
usb_scuttle_anchored_urbs(&ar->rx_pool);
usb_scuttle_anchored_urbs(&ar->tx_cmd);
}
int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
const bool free_buf)
{
struct urb *urb;
int err = 0;
if (!IS_INITIALIZED(ar)) {
err = -EPERM;
goto err_free;
}
if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) {
err = -EINVAL;
goto err_free;
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
err = -ENOMEM;
goto err_free;
}
if (ar->usb_ep_cmd_is_bulk)
usb_fill_bulk_urb(urb, ar->udev,
usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD),
cmd, cmd->hdr.len + 4,
carl9170_usb_cmd_complete, ar);
else
usb_fill_int_urb(urb, ar->udev,
usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD),
cmd, cmd->hdr.len + 4,
carl9170_usb_cmd_complete, ar, 1);
if (free_buf)
urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &ar->tx_cmd);
usb_free_urb(urb);
return carl9170_usb_submit_cmd_urb(ar);
err_free:
if (free_buf)
kfree(cmd);
return err;
}
int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
unsigned int plen, void *payload, unsigned int outlen, void *out)
{
int err = -ENOMEM;
unsigned long time_left;
if (!IS_ACCEPTING_CMD(ar))
return -EIO;
if (!(cmd & CARL9170_CMD_ASYNC_FLAG))
might_sleep();
ar->cmd.hdr.len = plen;
ar->cmd.hdr.cmd = cmd;
/* writing multiple regs fills this buffer already */
if (plen && payload != (u8 *)(ar->cmd.data))
memcpy(ar->cmd.data, payload, plen);
spin_lock_bh(&ar->cmd_lock);
ar->readbuf = (u8 *)out;
ar->readlen = outlen;
spin_unlock_bh(&ar->cmd_lock);
reinit_completion(&ar->cmd_wait);
err = __carl9170_exec_cmd(ar, &ar->cmd, false);
if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
time_left = wait_for_completion_timeout(&ar->cmd_wait, HZ);
if (time_left == 0) {
err = -ETIMEDOUT;
goto err_unbuf;
}
if (ar->readlen != outlen) {
err = -EMSGSIZE;
goto err_unbuf;
}
}
return 0;
err_unbuf:
/* Maybe the device was removed in the moment we were waiting? */
if (IS_STARTED(ar)) {
dev_err(&ar->udev->dev, "no command feedback "
"received (%d).\n", err);
/* provide some maybe useful debug information */
print_hex_dump_bytes("carl9170 cmd: ", DUMP_PREFIX_NONE,
&ar->cmd, plen + 4);
carl9170_restart(ar, CARL9170_RR_COMMAND_TIMEOUT);
}
/* invalidate to avoid completing the next command prematurely */
spin_lock_bh(&ar->cmd_lock);
ar->readbuf = NULL;
ar->readlen = 0;
spin_unlock_bh(&ar->cmd_lock);
return err;
}
void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb)
{
struct urb *urb;
struct ar9170_stream *tx_stream;
void *data;
unsigned int len;
if (!IS_STARTED(ar))
goto err_drop;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
goto err_drop;
if (ar->fw.tx_stream) {
tx_stream = (void *) (skb->data - sizeof(*tx_stream));
len = skb->len + sizeof(*tx_stream);
tx_stream->length = cpu_to_le16(len);
tx_stream->tag = cpu_to_le16(AR9170_TX_STREAM_TAG);
data = tx_stream;
} else {
data = skb->data;
len = skb->len;
}
usb_fill_bulk_urb(urb, ar->udev, usb_sndbulkpipe(ar->udev,
AR9170_USB_EP_TX), data, len,
carl9170_usb_tx_data_complete, skb);
urb->transfer_flags |= URB_ZERO_PACKET;
usb_anchor_urb(urb, &ar->tx_wait);
usb_free_urb(urb);
carl9170_usb_submit_data_urb(ar);
return;
err_drop:
carl9170_tx_drop(ar, skb);
carl9170_tx_callback(ar, skb);
}
static void carl9170_release_firmware(struct ar9170 *ar)
{
if (ar->fw.fw) {
release_firmware(ar->fw.fw);
memset(&ar->fw, 0, sizeof(ar->fw));
}
}
void carl9170_usb_stop(struct ar9170 *ar)
{
int ret;
carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STOPPED);
ret = carl9170_usb_flush(ar);
if (ret)
dev_err(&ar->udev->dev, "kill pending tx urbs.\n");
usb_poison_anchored_urbs(&ar->tx_anch);
carl9170_usb_handle_tx_err(ar);
/* kill any pending command */
spin_lock_bh(&ar->cmd_lock);
ar->readlen = 0;
spin_unlock_bh(&ar->cmd_lock);
complete(&ar->cmd_wait);
/*
* Note:
* So far we freed all tx urbs, but we won't dare to touch any rx urbs.
* Else we would end up with a unresponsive device...
*/
}
int carl9170_usb_open(struct ar9170 *ar)
{
usb_unpoison_anchored_urbs(&ar->tx_anch);
carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE);
return 0;
}
static int carl9170_usb_load_firmware(struct ar9170 *ar)
{
const u8 *data;
u8 *buf;
unsigned int transfer;
size_t len;
u32 addr;
int err = 0;
buf = kmalloc(4096, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
data = ar->fw.fw->data;
len = ar->fw.fw->size;
addr = ar->fw.address;
/* this removes the miniboot image */
data += ar->fw.offset;
len -= ar->fw.offset;
while (len) {
transfer = min_t(unsigned int, len, 4096u);
memcpy(buf, data, transfer);
err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0),
0x30 /* FW DL */, 0x40 | USB_DIR_OUT,
addr >> 8, 0, buf, transfer, 100);
if (err < 0) {
kfree(buf);
goto err_out;
}
len -= transfer;
data += transfer;
addr += transfer;
}
kfree(buf);
err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0),
0x31 /* FW DL COMPLETE */,
0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 200);
if (wait_for_completion_timeout(&ar->fw_boot_wait, HZ) == 0) {
err = -ETIMEDOUT;
goto err_out;
}
err = carl9170_echo_test(ar, 0x4a110123);
if (err)
goto err_out;
/* now, start the command response counter */
ar->cmd_seq = -1;
return 0;
err_out:
dev_err(&ar->udev->dev, "firmware upload failed (%d).\n", err);
return err;
}
int carl9170_usb_restart(struct ar9170 *ar)
{
int err = 0;
if (ar->intf->condition != USB_INTERFACE_BOUND)
return 0;
/*
* Disable the command response sequence counter check.
* We already know that the device/firmware is in a bad state.
* So, no extra points are awarded to anyone who reminds the
* driver about that.
*/
ar->cmd_seq = -2;
err = carl9170_reboot(ar);
carl9170_usb_stop(ar);
if (err)
goto err_out;
tasklet_schedule(&ar->usb_tasklet);
/* The reboot procedure can take quite a while to complete. */
msleep(1100);
err = carl9170_usb_open(ar);
if (err)
goto err_out;
err = carl9170_usb_load_firmware(ar);
if (err)
goto err_out;
return 0;
err_out:
carl9170_usb_cancel_urbs(ar);
return err;
}
void carl9170_usb_reset(struct ar9170 *ar)
{
/*
* This is the last resort to get the device going again
* without any *user replugging action*.
*
* But there is a catch: usb_reset really is like a physical
* *reconnect*. The mac80211 state will be lost in the process.
* Therefore a userspace application, which is monitoring
* the link must step in.
*/
carl9170_usb_cancel_urbs(ar);
carl9170_usb_stop(ar);
usb_queue_reset_device(ar->intf);
}
static int carl9170_usb_init_device(struct ar9170 *ar)
{
int err;
/*
* The carl9170 firmware let's the driver know when it's
* ready for action. But we have to be prepared to gracefully
* handle all spurious [flushed] messages after each (re-)boot.
* Thus the command response counter remains disabled until it
* can be safely synchronized.
*/
ar->cmd_seq = -2;
err = carl9170_usb_send_rx_irq_urb(ar);
if (err)
goto err_out;
err = carl9170_usb_init_rx_bulk_urbs(ar);
if (err)
goto err_unrx;
err = carl9170_usb_open(ar);
if (err)
goto err_unrx;
mutex_lock(&ar->mutex);
err = carl9170_usb_load_firmware(ar);
mutex_unlock(&ar->mutex);
if (err)
goto err_stop;
return 0;
err_stop:
carl9170_usb_stop(ar);
err_unrx:
carl9170_usb_cancel_urbs(ar);
err_out:
return err;
}
static void carl9170_usb_firmware_failed(struct ar9170 *ar)
{
/* Store a copies of the usb_interface and usb_device pointer locally.
* This is because release_driver initiates carl9170_usb_disconnect,
* which in turn frees our driver context (ar).
*/
struct usb_interface *intf = ar->intf;
struct usb_device *udev = ar->udev;
complete(&ar->fw_load_wait);
/* at this point 'ar' could be already freed. Don't use it anymore */
ar = NULL;
/* unbind anything failed */
usb_lock_device(udev);
usb_driver_release_interface(&carl9170_driver, intf);
usb_unlock_device(udev);
usb_put_intf(intf);
}
static void carl9170_usb_firmware_finish(struct ar9170 *ar)
{
struct usb_interface *intf = ar->intf;
int err;
err = carl9170_parse_firmware(ar);
if (err)
goto err_freefw;
err = carl9170_usb_init_device(ar);
if (err)
goto err_freefw;
err = carl9170_register(ar);
carl9170_usb_stop(ar);
if (err)
goto err_unrx;
complete(&ar->fw_load_wait);
usb_put_intf(intf);
return;
err_unrx:
carl9170_usb_cancel_urbs(ar);
err_freefw:
carl9170_release_firmware(ar);
carl9170_usb_firmware_failed(ar);
}
static void carl9170_usb_firmware_step2(const struct firmware *fw,
void *context)
{
struct ar9170 *ar = context;
if (fw) {
ar->fw.fw = fw;
carl9170_usb_firmware_finish(ar);
return;
}
dev_err(&ar->udev->dev, "firmware not found.\n");
carl9170_usb_firmware_failed(ar);
}
static int carl9170_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_endpoint_descriptor *ep;
struct ar9170 *ar;
struct usb_device *udev;
int i, err;
err = usb_reset_device(interface_to_usbdev(intf));
if (err)
return err;
ar = carl9170_alloc(sizeof(*ar));
if (IS_ERR(ar))
return PTR_ERR(ar);
udev = interface_to_usbdev(intf);
ar->udev = udev;
ar->intf = intf;
ar->features = id->driver_info;
/* We need to remember the type of endpoint 4 because it differs
* between high- and full-speed configuration. The high-speed
* configuration specifies it as interrupt and the full-speed
* configuration as bulk endpoint. This information is required
* later when sending urbs to that endpoint.
*/
for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) {
ep = &intf->cur_altsetting->endpoint[i].desc;
if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD &&
usb_endpoint_dir_out(ep) &&
usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK)
ar->usb_ep_cmd_is_bulk = true;
}
usb_set_intfdata(intf, ar);
SET_IEEE80211_DEV(ar->hw, &intf->dev);
init_usb_anchor(&ar->rx_anch);
init_usb_anchor(&ar->rx_pool);
init_usb_anchor(&ar->rx_work);
init_usb_anchor(&ar->tx_wait);
init_usb_anchor(&ar->tx_anch);
init_usb_anchor(&ar->tx_cmd);
init_usb_anchor(&ar->tx_err);
init_completion(&ar->cmd_wait);
init_completion(&ar->fw_boot_wait);
init_completion(&ar->fw_load_wait);
tasklet_setup(&ar->usb_tasklet, carl9170_usb_tasklet);
atomic_set(&ar->tx_cmd_urbs, 0);
atomic_set(&ar->tx_anch_urbs, 0);
atomic_set(&ar->rx_work_urbs, 0);
atomic_set(&ar->rx_anch_urbs, 0);
atomic_set(&ar->rx_pool_urbs, 0);
usb_get_intf(intf);
carl9170_set_state(ar, CARL9170_STOPPED);
err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
&ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
if (err) {
usb_put_intf(intf);
carl9170_free(ar);
}
return err;
}
static void carl9170_usb_disconnect(struct usb_interface *intf)
{
struct ar9170 *ar = usb_get_intfdata(intf);
if (WARN_ON(!ar))
return;
wait_for_completion(&ar->fw_load_wait);
if (IS_INITIALIZED(ar)) {
carl9170_reboot(ar);
carl9170_usb_stop(ar);
}
carl9170_usb_cancel_urbs(ar);
carl9170_unregister(ar);
usb_set_intfdata(intf, NULL);
carl9170_release_firmware(ar);
carl9170_free(ar);
}
#ifdef CONFIG_PM
static int carl9170_usb_suspend(struct usb_interface *intf,
pm_message_t message)
{
struct ar9170 *ar = usb_get_intfdata(intf);
if (!ar)
return -ENODEV;
carl9170_usb_cancel_urbs(ar);
return 0;
}
static int carl9170_usb_resume(struct usb_interface *intf)
{
struct ar9170 *ar = usb_get_intfdata(intf);
int err;
if (!ar)
return -ENODEV;
usb_unpoison_anchored_urbs(&ar->rx_anch);
carl9170_set_state(ar, CARL9170_STOPPED);
/*
* The USB documentation demands that [for suspend] all traffic
* to and from the device has to stop. This would be fine, but
* there's a catch: the device[usb phy] does not come back.
*
* Upon resume the firmware will "kill" itself and the
* boot-code sorts out the magic voodoo.
* Not very nice, but there's not much what could go wrong.
*/
msleep(1100);
err = carl9170_usb_init_device(ar);
if (err)
goto err_unrx;
return 0;
err_unrx:
carl9170_usb_cancel_urbs(ar);
return err;
}
#endif /* CONFIG_PM */
static struct usb_driver carl9170_driver = {
.name = KBUILD_MODNAME,
.probe = carl9170_usb_probe,
.disconnect = carl9170_usb_disconnect,
.id_table = carl9170_usb_ids,
.soft_unbind = 1,
#ifdef CONFIG_PM
.suspend = carl9170_usb_suspend,
.resume = carl9170_usb_resume,
.reset_resume = carl9170_usb_resume,
#endif /* CONFIG_PM */
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(carl9170_driver);
|
linux-master
|
drivers/net/wireless/ath/carl9170/usb.c
|
/*
* Atheros CARL9170 driver
*
* MAC programming
*
* Copyright 2008, Johannes Berg <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <asm/unaligned.h>
#include "carl9170.h"
#include "cmd.h"
int carl9170_set_dyn_sifs_ack(struct ar9170 *ar)
{
u32 val;
if (conf_is_ht40(&ar->hw->conf))
val = 0x010a;
else {
if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
val = 0x105;
else
val = 0x104;
}
return carl9170_write_reg(ar, AR9170_MAC_REG_DYNAMIC_SIFS_ACK, val);
}
int carl9170_set_rts_cts_rate(struct ar9170 *ar)
{
u32 rts_rate, cts_rate;
if (conf_is_ht(&ar->hw->conf)) {
/* 12 mbit OFDM */
rts_rate = 0x1da;
cts_rate = 0x10a;
} else {
if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
/* 11 mbit CCK */
rts_rate = 033;
cts_rate = 003;
} else {
/* 6 mbit OFDM */
rts_rate = 0x1bb;
cts_rate = 0x10b;
}
}
return carl9170_write_reg(ar, AR9170_MAC_REG_RTS_CTS_RATE,
rts_rate | (cts_rate) << 16);
}
int carl9170_set_slot_time(struct ar9170 *ar)
{
struct ieee80211_vif *vif;
u32 slottime = 20;
rcu_read_lock();
vif = carl9170_get_main_vif(ar);
if (!vif) {
rcu_read_unlock();
return 0;
}
if ((ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) ||
vif->bss_conf.use_short_slot)
slottime = 9;
rcu_read_unlock();
return carl9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
slottime << 10);
}
int carl9170_set_mac_rates(struct ar9170 *ar)
{
struct ieee80211_vif *vif;
u32 basic, mandatory;
rcu_read_lock();
vif = carl9170_get_main_vif(ar);
if (!vif) {
rcu_read_unlock();
return 0;
}
basic = (vif->bss_conf.basic_rates & 0xf);
basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
rcu_read_unlock();
if (ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
else
mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
carl9170_regwrite_begin(ar);
carl9170_regwrite(AR9170_MAC_REG_BASIC_RATE, basic);
carl9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, mandatory);
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
int carl9170_set_qos(struct ar9170 *ar)
{
carl9170_regwrite_begin(ar);
carl9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min |
(ar->edcf[0].cw_max << 16));
carl9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min |
(ar->edcf[1].cw_max << 16));
carl9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min |
(ar->edcf[2].cw_max << 16));
carl9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min |
(ar->edcf[3].cw_max << 16));
carl9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min |
(ar->edcf[4].cw_max << 16));
carl9170_regwrite(AR9170_MAC_REG_AC2_AC1_AC0_AIFS,
((ar->edcf[0].aifs * 9 + 10)) |
((ar->edcf[1].aifs * 9 + 10) << 12) |
((ar->edcf[2].aifs * 9 + 10) << 24));
carl9170_regwrite(AR9170_MAC_REG_AC4_AC3_AC2_AIFS,
((ar->edcf[2].aifs * 9 + 10) >> 8) |
((ar->edcf[3].aifs * 9 + 10) << 4) |
((ar->edcf[4].aifs * 9 + 10) << 16));
carl9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
ar->edcf[0].txop | ar->edcf[1].txop << 16);
carl9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
ar->edcf[2].txop | ar->edcf[3].txop << 16 |
ar->edcf[4].txop << 24);
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
int carl9170_init_mac(struct ar9170 *ar)
{
carl9170_regwrite_begin(ar);
/* switch MAC to OTUS interface */
carl9170_regwrite(0x1c3600, 0x3);
carl9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40);
carl9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0x0);
carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
AR9170_MAC_FTF_MONITOR);
/* enable MMIC */
carl9170_regwrite(AR9170_MAC_REG_SNIFFER,
AR9170_MAC_SNIFFER_DEFAULTS);
carl9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80);
carl9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70);
carl9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000);
carl9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10);
/* CF-END & CF-ACK rate => 24M OFDM */
carl9170_regwrite(AR9170_MAC_REG_TID_CFACK_CFEND_RATE, 0x59900000);
/* NAV protects ACK only (in TXOP) */
carl9170_regwrite(AR9170_MAC_REG_TXOP_DURATION, 0x201);
/* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */
/* OTUS set AM to 0x1 */
carl9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170);
carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
/* Aggregation MAX number and timeout */
carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0x8000a);
carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a07);
carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
AR9170_MAC_FTF_DEFAULTS);
carl9170_regwrite(AR9170_MAC_REG_RX_CONTROL,
AR9170_MAC_RX_CTRL_DEAGG |
AR9170_MAC_RX_CTRL_SHORT_FILTER);
/* rate sets */
carl9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f);
carl9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f);
carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x0030033);
/* MIMO response control */
carl9170_regwrite(AR9170_MAC_REG_ACK_TPC, 0x4003c1e);
carl9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff);
/* set PHY register read timeout (??) */
carl9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008);
/* Disable Rx TimeOut, workaround for BB. */
carl9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0);
/* Set WLAN DMA interrupt mode: generate int per packet */
carl9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011);
carl9170_regwrite(AR9170_MAC_REG_FCS_SELECT,
AR9170_MAC_FCS_FIFO_PROT);
/* Disables the CF_END frame, undocumented register */
carl9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND,
0x141e0f48);
/* reset group hash table */
carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, 0xffffffff);
carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, 0xffffffff);
/* disable PRETBTT interrupt */
carl9170_regwrite(AR9170_MAC_REG_PRETBTT, 0x0);
carl9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, 0x0);
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
static int carl9170_set_mac_reg(struct ar9170 *ar,
const u32 reg, const u8 *mac)
{
static const u8 zero[ETH_ALEN] = { 0 };
if (!mac)
mac = zero;
carl9170_regwrite_begin(ar);
carl9170_regwrite(reg, get_unaligned_le32(mac));
carl9170_regwrite(reg + 4, get_unaligned_le16(mac + 4));
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
int carl9170_mod_virtual_mac(struct ar9170 *ar, const unsigned int id,
const u8 *mac)
{
if (WARN_ON(id >= ar->fw.vif_num))
return -EINVAL;
return carl9170_set_mac_reg(ar,
AR9170_MAC_REG_ACK_TABLE + (id - 1) * 8, mac);
}
int carl9170_update_multicast(struct ar9170 *ar, const u64 mc_hash)
{
int err;
carl9170_regwrite_begin(ar);
carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, mc_hash >> 32);
carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, mc_hash);
carl9170_regwrite_finish();
err = carl9170_regwrite_result();
if (err)
return err;
ar->cur_mc_hash = mc_hash;
return 0;
}
int carl9170_set_operating_mode(struct ar9170 *ar)
{
struct ieee80211_vif *vif;
struct ath_common *common = &ar->common;
u8 *mac_addr, *bssid;
u32 cam_mode = AR9170_MAC_CAM_DEFAULTS;
u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS |
AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE;
u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG |
AR9170_MAC_RX_CTRL_SHORT_FILTER;
u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS;
int err = 0;
rcu_read_lock();
vif = carl9170_get_main_vif(ar);
if (vif) {
mac_addr = common->macaddr;
bssid = common->curbssid;
switch (vif->type) {
case NL80211_IFTYPE_ADHOC:
cam_mode |= AR9170_MAC_CAM_IBSS;
break;
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
cam_mode |= AR9170_MAC_CAM_AP;
/* iwlagn 802.11n STA Workaround */
rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
break;
case NL80211_IFTYPE_STATION:
cam_mode |= AR9170_MAC_CAM_STA;
rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
break;
default:
WARN(1, "Unsupported operation mode %x\n", vif->type);
err = -EOPNOTSUPP;
break;
}
} else {
/*
* Enable monitor mode
*
* rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
* sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
*
* When the hardware is in SNIFFER_PROMISC mode,
* it generates spurious ACKs for every incoming
* frame. This confuses every peer in the
* vicinity and the network throughput will suffer
* badly.
*
* Hence, the hardware will be put into station
* mode and just the rx filters are disabled.
*/
cam_mode |= AR9170_MAC_CAM_STA;
rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
mac_addr = common->macaddr;
bssid = NULL;
}
rcu_read_unlock();
if (err)
return err;
if (ar->rx_software_decryption)
enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
if (ar->sniffer_enabled) {
enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
}
err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr);
if (err)
return err;
err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid);
if (err)
return err;
carl9170_regwrite_begin(ar);
carl9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer);
carl9170_regwrite(AR9170_MAC_REG_CAM_MODE, cam_mode);
carl9170_regwrite(AR9170_MAC_REG_ENCRYPTION, enc_mode);
carl9170_regwrite(AR9170_MAC_REG_RX_CONTROL, rx_ctrl);
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
int carl9170_set_hwretry_limit(struct ar9170 *ar, const unsigned int max_retry)
{
u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111);
return carl9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp);
}
int carl9170_set_beacon_timers(struct ar9170 *ar)
{
struct ieee80211_vif *vif;
u32 v = 0;
u32 pretbtt = 0;
rcu_read_lock();
vif = carl9170_get_main_vif(ar);
if (vif) {
struct carl9170_vif_info *mvif;
mvif = (void *) vif->drv_priv;
if (mvif->enable_beacon && !WARN_ON(!ar->beacon_enabled)) {
ar->global_beacon_int = vif->bss_conf.beacon_int /
ar->beacon_enabled;
SET_VAL(AR9170_MAC_BCN_DTIM, v,
vif->bss_conf.dtim_period);
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
v |= AR9170_MAC_BCN_IBSS_MODE;
break;
case NL80211_IFTYPE_AP:
v |= AR9170_MAC_BCN_AP_MODE;
break;
default:
WARN_ON_ONCE(1);
break;
}
} else if (vif->type == NL80211_IFTYPE_STATION) {
ar->global_beacon_int = vif->bss_conf.beacon_int;
SET_VAL(AR9170_MAC_BCN_DTIM, v,
ar->hw->conf.ps_dtim_period);
v |= AR9170_MAC_BCN_STA_PS |
AR9170_MAC_BCN_PWR_MGT;
}
if (ar->global_beacon_int) {
if (ar->global_beacon_int < 15) {
rcu_read_unlock();
return -ERANGE;
}
ar->global_pretbtt = ar->global_beacon_int -
CARL9170_PRETBTT_KUS;
} else {
ar->global_pretbtt = 0;
}
} else {
ar->global_beacon_int = 0;
ar->global_pretbtt = 0;
}
rcu_read_unlock();
SET_VAL(AR9170_MAC_BCN_PERIOD, v, ar->global_beacon_int);
SET_VAL(AR9170_MAC_PRETBTT, pretbtt, ar->global_pretbtt);
SET_VAL(AR9170_MAC_PRETBTT2, pretbtt, ar->global_pretbtt);
carl9170_regwrite_begin(ar);
carl9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt);
carl9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v);
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
const u8 ktype, const u8 keyidx, const u8 *keydata,
const int keylen)
{
struct carl9170_set_key_cmd key = { };
static const u8 bcast[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
mac = mac ? : bcast;
key.user = cpu_to_le16(id);
key.keyId = cpu_to_le16(keyidx);
key.type = cpu_to_le16(ktype);
memcpy(&key.macAddr, mac, ETH_ALEN);
if (keydata)
memcpy(&key.key, keydata, keylen);
return carl9170_exec_cmd(ar, CARL9170_CMD_EKEY,
sizeof(key), (u8 *)&key, 0, NULL);
}
int carl9170_disable_key(struct ar9170 *ar, const u8 id)
{
struct carl9170_disable_key_cmd key = { };
key.user = cpu_to_le16(id);
return carl9170_exec_cmd(ar, CARL9170_CMD_DKEY,
sizeof(key), (u8 *)&key, 0, NULL);
}
int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel)
{
unsigned int power, chains;
if (ar->eeprom.tx_mask != 1)
chains = AR9170_TX_PHY_TXCHAIN_2;
else
chains = AR9170_TX_PHY_TXCHAIN_1;
switch (channel->band) {
case NL80211_BAND_2GHZ:
power = ar->power_2G_ofdm[0] & 0x3f;
break;
case NL80211_BAND_5GHZ:
power = ar->power_5G_leg[0] & 0x3f;
break;
default:
BUG();
}
power = min_t(unsigned int, power, ar->hw->conf.power_level * 2);
carl9170_regwrite_begin(ar);
carl9170_regwrite(AR9170_MAC_REG_ACK_TPC,
0x3c1e | power << 20 | chains << 26);
carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_TPC,
power << 5 | chains << 11 |
power << 21 | chains << 27);
carl9170_regwrite(AR9170_MAC_REG_CFEND_QOSNULL_TPC,
power << 5 | chains << 11 |
power << 21 | chains << 27);
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
|
linux-master
|
drivers/net/wireless/ath/carl9170/mac.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Atheros CARL9170 driver
*
* firmware parser
*
* Copyright 2009, 2010, Christian Lamparter <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/crc32.h>
#include <linux/module.h>
#include "carl9170.h"
#include "fwcmd.h"
#include "version.h"
static const u8 otus_magic[4] = { OTUS_MAGIC };
static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4],
const unsigned int len, const u8 compatible_revision)
{
const struct carl9170fw_desc_head *iter;
carl9170fw_for_each_hdr(iter, ar->fw.desc) {
if (carl9170fw_desc_cmp(iter, descid, len,
compatible_revision))
return (void *)iter;
}
/* needed to find the LAST desc */
if (carl9170fw_desc_cmp(iter, descid, len,
compatible_revision))
return (void *)iter;
return NULL;
}
static int carl9170_fw_verify_descs(struct ar9170 *ar,
const struct carl9170fw_desc_head *head, unsigned int max_len)
{
const struct carl9170fw_desc_head *pos;
unsigned long pos_addr, end_addr;
unsigned int pos_length;
if (max_len < sizeof(*pos))
return -ENODATA;
max_len = min_t(unsigned int, CARL9170FW_DESC_MAX_LENGTH, max_len);
pos = head;
pos_addr = (unsigned long) pos;
end_addr = pos_addr + max_len;
while (pos_addr < end_addr) {
if (pos_addr + sizeof(*head) > end_addr)
return -E2BIG;
pos_length = le16_to_cpu(pos->length);
if (pos_length < sizeof(*head))
return -EBADMSG;
if (pos_length > max_len)
return -EOVERFLOW;
if (pos_addr + pos_length > end_addr)
return -EMSGSIZE;
if (carl9170fw_desc_cmp(pos, LAST_MAGIC,
CARL9170FW_LAST_DESC_SIZE,
CARL9170FW_LAST_DESC_CUR_VER))
return 0;
pos_addr += pos_length;
pos = (void *)pos_addr;
max_len -= pos_length;
}
return -EINVAL;
}
static void carl9170_fw_info(struct ar9170 *ar)
{
const struct carl9170fw_motd_desc *motd_desc;
unsigned int str_ver_len;
u32 fw_date;
dev_info(&ar->udev->dev, "driver API: %s 2%03d-%02d-%02d [%d-%d]\n",
CARL9170FW_VERSION_GIT, CARL9170FW_VERSION_YEAR,
CARL9170FW_VERSION_MONTH, CARL9170FW_VERSION_DAY,
CARL9170FW_API_MIN_VER, CARL9170FW_API_MAX_VER);
motd_desc = carl9170_fw_find_desc(ar, MOTD_MAGIC,
sizeof(*motd_desc), CARL9170FW_MOTD_DESC_CUR_VER);
if (motd_desc) {
str_ver_len = strnlen(motd_desc->release,
CARL9170FW_MOTD_RELEASE_LEN);
fw_date = le32_to_cpu(motd_desc->fw_year_month_day);
dev_info(&ar->udev->dev, "firmware API: %.*s 2%03d-%02d-%02d\n",
str_ver_len, motd_desc->release,
CARL9170FW_GET_YEAR(fw_date),
CARL9170FW_GET_MONTH(fw_date),
CARL9170FW_GET_DAY(fw_date));
strscpy(ar->hw->wiphy->fw_version, motd_desc->release,
sizeof(ar->hw->wiphy->fw_version));
}
}
static bool valid_dma_addr(const u32 address)
{
if (address >= AR9170_SRAM_OFFSET &&
address < (AR9170_SRAM_OFFSET + AR9170_SRAM_SIZE))
return true;
return false;
}
static bool valid_cpu_addr(const u32 address)
{
if (valid_dma_addr(address) || (address >= AR9170_PRAM_OFFSET &&
address < (AR9170_PRAM_OFFSET + AR9170_PRAM_SIZE)))
return true;
return false;
}
static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data,
size_t len)
{
const struct carl9170fw_otus_desc *otus_desc;
const struct carl9170fw_last_desc *last_desc;
const struct carl9170fw_chk_desc *chk_desc;
unsigned long fin, diff;
unsigned int dsc_len;
u32 crc32;
last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
if (!last_desc)
return -EINVAL;
otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
if (!otus_desc) {
dev_err(&ar->udev->dev, "failed to find compatible firmware "
"descriptor.\n");
return -ENODATA;
}
chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC,
sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER);
if (!chk_desc) {
dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
return 0;
}
dsc_len = min_t(unsigned int, len,
(unsigned long)chk_desc - (unsigned long)otus_desc);
fin = (unsigned long) last_desc + sizeof(*last_desc);
diff = fin - (unsigned long) otus_desc;
if (diff < len)
len -= diff;
if (len < 256)
return -EIO;
crc32 = crc32_le(~0, data, len);
if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
dev_err(&ar->udev->dev, "fw checksum test failed.\n");
return -ENOEXEC;
}
crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
dev_err(&ar->udev->dev, "descriptor check failed.\n");
return -EINVAL;
}
return 0;
}
static int carl9170_fw_tx_sequence(struct ar9170 *ar)
{
const struct carl9170fw_txsq_desc *txsq_desc;
txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc),
CARL9170FW_TXSQ_DESC_CUR_VER);
if (txsq_desc) {
ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
if (!valid_cpu_addr(ar->fw.tx_seq_table))
return -EINVAL;
} else {
ar->fw.tx_seq_table = 0;
}
return 0;
}
static void carl9170_fw_set_if_combinations(struct ar9170 *ar,
u16 if_comb_types)
{
if (ar->fw.vif_num < 2)
return;
ar->if_comb_limits[0].max = ar->fw.vif_num;
ar->if_comb_limits[0].types = if_comb_types;
ar->if_combs[0].num_different_channels = 1;
ar->if_combs[0].max_interfaces = ar->fw.vif_num;
ar->if_combs[0].limits = ar->if_comb_limits;
ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits);
ar->hw->wiphy->iface_combinations = ar->if_combs;
ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs);
}
static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
{
const struct carl9170fw_otus_desc *otus_desc;
int err;
u16 if_comb_types;
err = carl9170_fw_checksum(ar, data, len);
if (err)
return err;
otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
if (!otus_desc) {
return -ENODATA;
}
#define SUPP(feat) \
(carl9170fw_supports(otus_desc->feature_set, feat))
if (!SUPP(CARL9170FW_DUMMY_FEATURE)) {
dev_err(&ar->udev->dev, "invalid firmware descriptor "
"format detected.\n");
return -EINVAL;
}
ar->fw.api_version = otus_desc->api_ver;
if (ar->fw.api_version < CARL9170FW_API_MIN_VER ||
ar->fw.api_version > CARL9170FW_API_MAX_VER) {
dev_err(&ar->udev->dev, "unsupported firmware api version.\n");
return -EINVAL;
}
if (!SUPP(CARL9170FW_COMMAND_PHY) || SUPP(CARL9170FW_UNUSABLE) ||
!SUPP(CARL9170FW_HANDLE_BACK_REQ)) {
dev_err(&ar->udev->dev, "firmware does support "
"mandatory features.\n");
return -ECANCELED;
}
if (ilog2(le32_to_cpu(otus_desc->feature_set)) >=
__CARL9170FW_FEATURE_NUM) {
dev_warn(&ar->udev->dev, "driver does not support all "
"firmware features.\n");
}
if (!SUPP(CARL9170FW_COMMAND_CAM)) {
dev_info(&ar->udev->dev, "crypto offloading is disabled "
"by firmware.\n");
ar->fw.disable_offload_fw = true;
}
if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM))
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) {
dev_err(&ar->udev->dev, "firmware does not provide "
"mandatory interfaces.\n");
return -EINVAL;
}
if (SUPP(CARL9170FW_MINIBOOT))
ar->fw.offset = le16_to_cpu(otus_desc->miniboot_size);
else
ar->fw.offset = 0;
if (SUPP(CARL9170FW_USB_DOWN_STREAM)) {
ar->hw->extra_tx_headroom += sizeof(struct ar9170_stream);
ar->fw.tx_stream = true;
}
if (SUPP(CARL9170FW_USB_UP_STREAM))
ar->fw.rx_stream = true;
if (SUPP(CARL9170FW_RX_FILTER)) {
ar->fw.rx_filter = true;
ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL |
FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS;
}
if (SUPP(CARL9170FW_HW_COUNTERS))
ar->fw.hw_counters = true;
if (SUPP(CARL9170FW_WOL))
device_set_wakeup_enable(&ar->udev->dev, true);
if (SUPP(CARL9170FW_RX_BA_FILTER))
ar->fw.ba_filter = true;
if_comb_types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT);
ar->fw.vif_num = otus_desc->vif_num;
ar->fw.cmd_bufs = otus_desc->cmd_bufs;
ar->fw.address = le32_to_cpu(otus_desc->fw_address);
ar->fw.rx_size = le16_to_cpu(otus_desc->rx_max_frame_len);
ar->fw.mem_blocks = min_t(unsigned int, otus_desc->tx_descs, 0xfe);
atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
ar->fw.mem_block_size = le16_to_cpu(otus_desc->tx_frag_len);
if (ar->fw.vif_num >= AR9170_MAX_VIRTUAL_MAC || !ar->fw.vif_num ||
ar->fw.mem_blocks < 16 || !ar->fw.cmd_bufs ||
ar->fw.mem_block_size < 64 || ar->fw.mem_block_size > 512 ||
ar->fw.rx_size > 32768 || ar->fw.rx_size < 4096 ||
!valid_cpu_addr(ar->fw.address)) {
dev_err(&ar->udev->dev, "firmware shows obvious signs of "
"malicious tampering.\n");
return -EINVAL;
}
ar->fw.beacon_addr = le32_to_cpu(otus_desc->bcn_addr);
ar->fw.beacon_max_len = le16_to_cpu(otus_desc->bcn_len);
if (valid_dma_addr(ar->fw.beacon_addr) && ar->fw.beacon_max_len >=
AR9170_MAC_BCN_LENGTH_MAX) {
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
if (SUPP(CARL9170FW_WLANTX_CAB)) {
if_comb_types |= BIT(NL80211_IFTYPE_AP);
#ifdef CONFIG_MAC80211_MESH
if_comb_types |=
BIT(NL80211_IFTYPE_MESH_POINT);
#endif /* CONFIG_MAC80211_MESH */
}
}
carl9170_fw_set_if_combinations(ar, if_comb_types);
ar->hw->wiphy->interface_modes |= if_comb_types;
ar->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/* As IBSS Encryption is software-based, IBSS RSN is supported. */
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_SUPPORTS_TDLS;
#undef SUPPORTED
return carl9170_fw_tx_sequence(ar);
}
static struct carl9170fw_desc_head *
carl9170_find_fw_desc(struct ar9170 *ar, const __u8 *fw_data, const size_t len)
{
int scan = 0, found = 0;
if (!carl9170fw_size_check(len)) {
dev_err(&ar->udev->dev, "firmware size is out of bound.\n");
return NULL;
}
while (scan < len - sizeof(struct carl9170fw_desc_head)) {
if (fw_data[scan++] == otus_magic[found])
found++;
else
found = 0;
if (scan >= len)
break;
if (found == sizeof(otus_magic))
break;
}
if (found != sizeof(otus_magic))
return NULL;
return (void *)&fw_data[scan - found];
}
int carl9170_parse_firmware(struct ar9170 *ar)
{
const struct carl9170fw_desc_head *fw_desc = NULL;
const struct firmware *fw = ar->fw.fw;
unsigned long header_offset = 0;
int err;
if (WARN_ON(!fw))
return -EINVAL;
fw_desc = carl9170_find_fw_desc(ar, fw->data, fw->size);
if (!fw_desc) {
dev_err(&ar->udev->dev, "unsupported firmware.\n");
return -ENODATA;
}
header_offset = (unsigned long)fw_desc - (unsigned long)fw->data;
err = carl9170_fw_verify_descs(ar, fw_desc, fw->size - header_offset);
if (err) {
dev_err(&ar->udev->dev, "damaged firmware (%d).\n", err);
return err;
}
ar->fw.desc = fw_desc;
carl9170_fw_info(ar);
err = carl9170_fw(ar, fw->data, fw->size);
if (err) {
dev_err(&ar->udev->dev, "failed to parse firmware (%d).\n",
err);
return err;
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/carl9170/fw.c
|
/*
* Atheros CARL9170 driver
*
* Basic HW register/memory/command access functions
*
* Copyright 2008, Johannes Berg <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <asm/div64.h>
#include "carl9170.h"
#include "cmd.h"
int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
{
const __le32 buf[2] = {
cpu_to_le32(reg),
cpu_to_le32(val),
};
int err;
err = carl9170_exec_cmd(ar, CARL9170_CMD_WREG, sizeof(buf),
(u8 *) buf, 0, NULL);
if (err) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "writing reg %#x "
"(val %#x) failed (%d)\n", reg, val, err);
}
}
return err;
}
int carl9170_read_mreg(struct ar9170 *ar, const int nregs,
const u32 *regs, u32 *out)
{
int i, err;
__le32 *offs, *res;
/* abuse "out" for the register offsets, must be same length */
offs = (__le32 *)out;
for (i = 0; i < nregs; i++)
offs[i] = cpu_to_le32(regs[i]);
/* also use the same buffer for the input */
res = (__le32 *)out;
err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
4 * nregs, (u8 *)offs,
4 * nregs, (u8 *)res);
if (err) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "reading regs failed (%d)\n",
err);
}
return err;
}
/* convert result to cpu endian */
for (i = 0; i < nregs; i++)
out[i] = le32_to_cpu(res[i]);
return 0;
}
int carl9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
{
return carl9170_read_mreg(ar, 1, ®, val);
}
int carl9170_echo_test(struct ar9170 *ar, const u32 v)
{
u32 echores;
int err;
err = carl9170_exec_cmd(ar, CARL9170_CMD_ECHO,
4, (u8 *)&v,
4, (u8 *)&echores);
if (err)
return err;
if (v != echores) {
wiphy_info(ar->hw->wiphy, "wrong echo %x != %x", v, echores);
return -EINVAL;
}
return 0;
}
struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar,
const enum carl9170_cmd_oids cmd, const unsigned int len)
{
struct carl9170_cmd *tmp;
tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
if (tmp) {
tmp->hdr.cmd = cmd;
tmp->hdr.len = len;
}
return tmp;
}
int carl9170_reboot(struct ar9170 *ar)
{
struct carl9170_cmd *cmd;
int err;
cmd = carl9170_cmd_buf(ar, CARL9170_CMD_REBOOT_ASYNC, 0);
if (!cmd)
return -ENOMEM;
err = __carl9170_exec_cmd(ar, cmd, true);
return err;
}
int carl9170_mac_reset(struct ar9170 *ar)
{
return carl9170_exec_cmd(ar, CARL9170_CMD_SWRST,
0, NULL, 0, NULL);
}
int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
const u32 mode, const u32 addr, const u32 len)
{
struct carl9170_cmd *cmd;
cmd = carl9170_cmd_buf(ar, CARL9170_CMD_BCN_CTRL_ASYNC,
sizeof(struct carl9170_bcn_ctrl_cmd));
if (!cmd)
return -ENOMEM;
cmd->bcn_ctrl.vif_id = cpu_to_le32(vif_id);
cmd->bcn_ctrl.mode = cpu_to_le32(mode);
cmd->bcn_ctrl.bcn_addr = cpu_to_le32(addr);
cmd->bcn_ctrl.bcn_len = cpu_to_le32(len);
return __carl9170_exec_cmd(ar, cmd, true);
}
int carl9170_collect_tally(struct ar9170 *ar)
{
struct carl9170_tally_rsp tally;
struct survey_info *info;
unsigned int tick;
int err;
err = carl9170_exec_cmd(ar, CARL9170_CMD_TALLY, 0, NULL,
sizeof(tally), (u8 *)&tally);
if (err)
return err;
tick = le32_to_cpu(tally.tick);
if (tick) {
ar->tally.active += le32_to_cpu(tally.active) / tick;
ar->tally.cca += le32_to_cpu(tally.cca) / tick;
ar->tally.tx_time += le32_to_cpu(tally.tx_time) / tick;
ar->tally.rx_total += le32_to_cpu(tally.rx_total);
ar->tally.rx_overrun += le32_to_cpu(tally.rx_overrun);
if (ar->channel) {
info = &ar->survey[ar->channel->hw_value];
info->time = ar->tally.active;
info->time_busy = ar->tally.cca;
info->time_tx = ar->tally.tx_time;
do_div(info->time, 1000);
do_div(info->time_busy, 1000);
do_div(info->time_tx, 1000);
}
}
return 0;
}
int carl9170_powersave(struct ar9170 *ar, const bool ps)
{
struct carl9170_cmd *cmd;
u32 state;
cmd = carl9170_cmd_buf(ar, CARL9170_CMD_PSM_ASYNC,
sizeof(struct carl9170_psm));
if (!cmd)
return -ENOMEM;
if (ps) {
/* Sleep until next TBTT */
state = CARL9170_PSM_SLEEP | 1;
} else {
/* wake up immediately */
state = 1;
}
cmd->psm.state = cpu_to_le32(state);
return __carl9170_exec_cmd(ar, cmd, true);
}
|
linux-master
|
drivers/net/wireless/ath/carl9170/cmd.c
|
/*
* Atheros CARL9170 driver
*
* debug(fs) probing
*
* Copyright 2008, Johannes Berg <[email protected]>
* Copyright 2009, 2010, Christian Lamparter <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2008-2009 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include "carl9170.h"
#include "cmd.h"
#define ADD(buf, off, max, fmt, args...) \
off += scnprintf(&buf[off], max - off, fmt, ##args)
struct carl9170_debugfs_fops {
unsigned int read_bufsize;
umode_t attr;
char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
ssize_t *len);
ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
const struct file_operations fops;
enum carl9170_device_state req_dev_state;
};
static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct carl9170_debugfs_fops *dfops;
struct ar9170 *ar;
char *buf = NULL, *res_buf = NULL;
ssize_t ret = 0;
int err = 0;
if (!count)
return 0;
ar = file->private_data;
if (!ar)
return -ENODEV;
dfops = container_of(debugfs_real_fops(file),
struct carl9170_debugfs_fops, fops);
if (!dfops->read)
return -ENOSYS;
if (dfops->read_bufsize) {
buf = vmalloc(dfops->read_bufsize);
if (!buf)
return -ENOMEM;
}
mutex_lock(&ar->mutex);
if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) {
err = -ENODEV;
res_buf = buf;
goto out_free;
}
res_buf = dfops->read(ar, buf, dfops->read_bufsize, &ret);
if (ret > 0)
err = simple_read_from_buffer(userbuf, count, ppos,
res_buf, ret);
else
err = ret;
WARN_ON_ONCE(dfops->read_bufsize && (res_buf != buf));
out_free:
vfree(res_buf);
mutex_unlock(&ar->mutex);
return err;
}
static ssize_t carl9170_debugfs_write(struct file *file,
const char __user *userbuf, size_t count, loff_t *ppos)
{
struct carl9170_debugfs_fops *dfops;
struct ar9170 *ar;
char *buf = NULL;
int err = 0;
if (!count)
return 0;
if (count > PAGE_SIZE)
return -E2BIG;
ar = file->private_data;
if (!ar)
return -ENODEV;
dfops = container_of(debugfs_real_fops(file),
struct carl9170_debugfs_fops, fops);
if (!dfops->write)
return -ENOSYS;
buf = vmalloc(count);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, userbuf, count)) {
err = -EFAULT;
goto out_free;
}
if (mutex_trylock(&ar->mutex) == 0) {
err = -EAGAIN;
goto out_free;
}
if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) {
err = -ENODEV;
goto out_unlock;
}
err = dfops->write(ar, buf, count);
if (err)
goto out_unlock;
out_unlock:
mutex_unlock(&ar->mutex);
out_free:
vfree(buf);
return err;
}
#define __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
_attr, _dstate) \
static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\
.read_bufsize = _read_bufsize, \
.read = _read, \
.write = _write, \
.attr = _attr, \
.req_dev_state = _dstate, \
.fops = { \
.open = simple_open, \
.read = carl9170_debugfs_read, \
.write = carl9170_debugfs_write, \
.owner = THIS_MODULE \
}, \
}
#define DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, _attr) \
__DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
_attr, CARL9170_STARTED) \
#define DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize) \
DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
NULL, _read_bufsize, 0400)
#define DEBUGFS_DECLARE_WO_FILE(name) \
DEBUGFS_DECLARE_FILE(name, NULL, carl9170_debugfs_##name ##_write,\
0, 0200)
#define DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize) \
DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
carl9170_debugfs_##name ##_write, \
_read_bufsize, 0600)
#define __DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize, _dstate) \
__DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
carl9170_debugfs_##name ##_write, \
_read_bufsize, 0600, _dstate)
#define DEBUGFS_READONLY_FILE(name, _read_bufsize, fmt, value...) \
static char *carl9170_debugfs_ ##name ## _read(struct ar9170 *ar, \
char *buf, size_t buf_size,\
ssize_t *len) \
{ \
ADD(buf, *len, buf_size, fmt "\n", ##value); \
return buf; \
} \
DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize)
static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *len)
{
spin_lock_bh(&ar->mem_lock);
ADD(buf, *len, bufsize, "jar: [%*pb]\n",
ar->fw.mem_blocks, ar->mem_bitmap);
ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n",
bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks),
ar->fw.mem_blocks, atomic_read(&ar->mem_allocs));
ADD(buf, *len, bufsize, "memory: free:%3d (%3d KiB) / total:%3d KiB)\n",
atomic_read(&ar->mem_free_blocks),
(atomic_read(&ar->mem_free_blocks) * ar->fw.mem_block_size) / 1024,
(ar->fw.mem_blocks * ar->fw.mem_block_size) / 1024);
spin_unlock_bh(&ar->mem_lock);
return buf;
}
DEBUGFS_DECLARE_RO_FILE(mem_usage, 512);
static char *carl9170_debugfs_qos_stat_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *len)
{
ADD(buf, *len, bufsize, "%s QoS AC\n", modparam_noht ? "Hardware" :
"Software");
ADD(buf, *len, bufsize, "[ VO VI "
" BE BK ]\n");
spin_lock_bh(&ar->tx_stats_lock);
ADD(buf, *len, bufsize, "[length/limit length/limit "
"length/limit length/limit ]\n"
"[ %3d/%3d %3d/%3d "
" %3d/%3d %3d/%3d ]\n\n",
ar->tx_stats[0].len, ar->tx_stats[0].limit,
ar->tx_stats[1].len, ar->tx_stats[1].limit,
ar->tx_stats[2].len, ar->tx_stats[2].limit,
ar->tx_stats[3].len, ar->tx_stats[3].limit);
ADD(buf, *len, bufsize, "[ total total "
" total total ]\n"
"[%10d %10d %10d %10d ]\n\n",
ar->tx_stats[0].count, ar->tx_stats[1].count,
ar->tx_stats[2].count, ar->tx_stats[3].count);
spin_unlock_bh(&ar->tx_stats_lock);
ADD(buf, *len, bufsize, "[ pend/waittx pend/waittx "
" pend/waittx pend/waittx]\n"
"[ %3d/%3d %3d/%3d "
" %3d/%3d %3d/%3d ]\n\n",
skb_queue_len(&ar->tx_pending[0]),
skb_queue_len(&ar->tx_status[0]),
skb_queue_len(&ar->tx_pending[1]),
skb_queue_len(&ar->tx_status[1]),
skb_queue_len(&ar->tx_pending[2]),
skb_queue_len(&ar->tx_status[2]),
skb_queue_len(&ar->tx_pending[3]),
skb_queue_len(&ar->tx_status[3]));
return buf;
}
DEBUGFS_DECLARE_RO_FILE(qos_stat, 512);
static void carl9170_debugfs_format_frame(struct ar9170 *ar,
struct sk_buff *skb, const char *prefix, char *buf,
ssize_t *off, ssize_t bufsize)
{
struct _carl9170_tx_superframe *txc = (void *) skb->data;
struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
struct carl9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
struct ieee80211_hdr *hdr = (void *) txc->frame_data;
ADD(buf, *off, bufsize, "%s %p, c:%2x, DA:%pM, sq:%4d, mc:%.4x, "
"pc:%.8x, to:%d ms\n", prefix, skb, txc->s.cookie,
ieee80211_get_DA(hdr), get_seq_h(hdr),
le16_to_cpu(txc->f.mac_control), le32_to_cpu(txc->f.phy_control),
jiffies_to_msecs(jiffies - arinfo->timeout));
}
static char *carl9170_debugfs_ampdu_state_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *len)
{
struct carl9170_sta_tid *iter;
struct sk_buff *skb;
int cnt = 0, fc;
int offset;
rcu_read_lock();
list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
spin_lock_bh(&iter->lock);
ADD(buf, *len, bufsize, "Entry: #%2d TID:%1d, BSN:%4d, "
"SNX:%4d, HSN:%4d, BAW:%2d, state:%1d, toggles:%d\n",
cnt, iter->tid, iter->bsn, iter->snx, iter->hsn,
iter->max, iter->state, iter->counter);
ADD(buf, *len, bufsize, "\tWindow: [%*pb,W]\n",
CARL9170_BAW_BITS, iter->bitmap);
#define BM_STR_OFF(offset) \
((CARL9170_BAW_BITS - (offset) - 1) / 4 + \
(CARL9170_BAW_BITS - (offset) - 1) / 32 + 1)
offset = BM_STR_OFF(0);
ADD(buf, *len, bufsize, "\tBase Seq: %*s\n", offset, "T");
offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn));
ADD(buf, *len, bufsize, "\tNext Seq: %*s\n", offset, "W");
offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) %
CARL9170_BAW_BITS);
ADD(buf, *len, bufsize, "\tLast Seq: %*s\n", offset, "N");
ADD(buf, *len, bufsize, "\tPre-Aggregation reorder buffer: "
" currently queued:%d\n", skb_queue_len(&iter->queue));
fc = 0;
skb_queue_walk(&iter->queue, skb) {
char prefix[32];
snprintf(prefix, sizeof(prefix), "\t\t%3d :", fc);
carl9170_debugfs_format_frame(ar, skb, prefix, buf,
len, bufsize);
fc++;
}
spin_unlock_bh(&iter->lock);
cnt++;
}
rcu_read_unlock();
return buf;
}
DEBUGFS_DECLARE_RO_FILE(ampdu_state, 8000);
static void carl9170_debugfs_queue_dump(struct ar9170 *ar, char *buf,
ssize_t *len, size_t bufsize, struct sk_buff_head *queue)
{
struct sk_buff *skb;
char prefix[16];
int fc = 0;
spin_lock_bh(&queue->lock);
skb_queue_walk(queue, skb) {
snprintf(prefix, sizeof(prefix), "%3d :", fc);
carl9170_debugfs_format_frame(ar, skb, prefix, buf,
len, bufsize);
fc++;
}
spin_unlock_bh(&queue->lock);
}
#define DEBUGFS_QUEUE_DUMP(q, qi) \
static char *carl9170_debugfs_##q ##_##qi ##_read(struct ar9170 *ar, \
char *buf, size_t bufsize, ssize_t *len) \
{ \
carl9170_debugfs_queue_dump(ar, buf, len, bufsize, &ar->q[qi]); \
return buf; \
} \
DEBUGFS_DECLARE_RO_FILE(q##_##qi, 8000);
static char *carl9170_debugfs_sta_psm_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *len)
{
ADD(buf, *len, bufsize, "psm state: %s\n", (ar->ps.off_override ?
"FORCE CAM" : (ar->ps.state ? "PSM" : "CAM")));
ADD(buf, *len, bufsize, "sleep duration: %d ms.\n", ar->ps.sleep_ms);
ADD(buf, *len, bufsize, "last power-state transition: %d ms ago.\n",
jiffies_to_msecs(jiffies - ar->ps.last_action));
ADD(buf, *len, bufsize, "last CAM->PSM transition: %d ms ago.\n",
jiffies_to_msecs(jiffies - ar->ps.last_slept));
return buf;
}
DEBUGFS_DECLARE_RO_FILE(sta_psm, 160);
static char *carl9170_debugfs_tx_stuck_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *len)
{
int i;
for (i = 0; i < ar->hw->queues; i++) {
ADD(buf, *len, bufsize, "TX queue [%d]: %10d max:%10d ms.\n",
i, ieee80211_queue_stopped(ar->hw, i) ?
jiffies_to_msecs(jiffies - ar->queue_stop_timeout[i]) : 0,
jiffies_to_msecs(ar->max_queue_stop_timeout[i]));
ar->max_queue_stop_timeout[i] = 0;
}
return buf;
}
DEBUGFS_DECLARE_RO_FILE(tx_stuck, 180);
static char *carl9170_debugfs_phy_noise_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *len)
{
int err;
err = carl9170_get_noisefloor(ar);
if (err) {
*len = err;
return buf;
}
ADD(buf, *len, bufsize, "Chain 0: %10d dBm, ext. chan.:%10d dBm\n",
ar->noise[0], ar->noise[2]);
ADD(buf, *len, bufsize, "Chain 2: %10d dBm, ext. chan.:%10d dBm\n",
ar->noise[1], ar->noise[3]);
return buf;
}
DEBUGFS_DECLARE_RO_FILE(phy_noise, 180);
static char *carl9170_debugfs_vif_dump_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *len)
{
struct carl9170_vif_info *iter;
int i = 0;
ADD(buf, *len, bufsize, "registered VIFs:%d \\ %d\n",
ar->vifs, ar->fw.vif_num);
ADD(buf, *len, bufsize, "VIF bitmap: [%*pb]\n",
ar->fw.vif_num, &ar->vif_bitmap);
rcu_read_lock();
list_for_each_entry_rcu(iter, &ar->vif_list, list) {
struct ieee80211_vif *vif = carl9170_get_vif(iter);
ADD(buf, *len, bufsize, "\t%d = [%s VIF, id:%d, type:%x "
" mac:%pM %s]\n", i, (carl9170_get_main_vif(ar) == vif ?
"Master" : " Slave"), iter->id, vif->type, vif->addr,
iter->enable_beacon ? "beaconing " : "");
i++;
}
rcu_read_unlock();
return buf;
}
DEBUGFS_DECLARE_RO_FILE(vif_dump, 8000);
#define UPDATE_COUNTER(ar, name) ({ \
u32 __tmp[ARRAY_SIZE(name##_regs)]; \
unsigned int __i, __err = -ENODEV; \
\
for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \
__tmp[__i] = name##_regs[__i].reg; \
ar->debug.stats.name##_counter[__i] = 0; \
} \
\
if (IS_STARTED(ar)) \
__err = carl9170_read_mreg(ar, ARRAY_SIZE(name##_regs), \
__tmp, ar->debug.stats.name##_counter); \
(__err); })
#define TALLY_SUM_UP(ar, name) do { \
unsigned int __i; \
\
for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \
ar->debug.stats.name##_sum[__i] += \
ar->debug.stats.name##_counter[__i]; \
} \
} while (0)
#define DEBUGFS_HW_TALLY_FILE(name, f) \
static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
char *dum, size_t bufsize, ssize_t *ret) \
{ \
char *buf; \
int i, max_len, err; \
\
max_len = ARRAY_SIZE(name##_regs) * 80; \
buf = vmalloc(max_len); \
if (!buf) \
return NULL; \
\
err = UPDATE_COUNTER(ar, name); \
if (err) { \
*ret = err; \
return buf; \
} \
\
TALLY_SUM_UP(ar, name); \
\
for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \
ADD(buf, *ret, max_len, "%22s = %" f "[+%" f "]\n", \
name##_regs[i].nreg, ar->debug.stats.name ##_sum[i],\
ar->debug.stats.name ##_counter[i]); \
} \
\
return buf; \
} \
DEBUGFS_DECLARE_RO_FILE(name, 0);
#define DEBUGFS_HW_REG_FILE(name, f) \
static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
char *dum, size_t bufsize, ssize_t *ret) \
{ \
char *buf; \
int i, max_len, err; \
\
max_len = ARRAY_SIZE(name##_regs) * 80; \
buf = vmalloc(max_len); \
if (!buf) \
return NULL; \
\
err = UPDATE_COUNTER(ar, name); \
if (err) { \
*ret = err; \
return buf; \
} \
\
for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \
ADD(buf, *ret, max_len, "%22s = %" f "\n", \
name##_regs[i].nreg, \
ar->debug.stats.name##_counter[i]); \
} \
\
return buf; \
} \
DEBUGFS_DECLARE_RO_FILE(name, 0);
static ssize_t carl9170_debugfs_hw_ioread32_write(struct ar9170 *ar,
const char *buf, size_t count)
{
int err = 0, i, n = 0, max_len = 32, res;
unsigned int reg, tmp;
if (!count)
return 0;
if (count > max_len)
return -E2BIG;
res = sscanf(buf, "0x%X %d", ®, &n);
if (res < 1) {
err = -EINVAL;
goto out;
}
if (res == 1)
n = 1;
if (n > 15) {
err = -EMSGSIZE;
goto out;
}
if ((reg >= 0x280000) || ((reg + (n << 2)) >= 0x280000)) {
err = -EADDRNOTAVAIL;
goto out;
}
if (reg & 3) {
err = -EINVAL;
goto out;
}
for (i = 0; i < n; i++) {
err = carl9170_read_reg(ar, reg + (i << 2), &tmp);
if (err)
goto out;
ar->debug.ring[ar->debug.ring_tail].reg = reg + (i << 2);
ar->debug.ring[ar->debug.ring_tail].value = tmp;
ar->debug.ring_tail++;
ar->debug.ring_tail %= CARL9170_DEBUG_RING_SIZE;
}
out:
return err ? err : count;
}
static char *carl9170_debugfs_hw_ioread32_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *ret)
{
int i = 0;
while (ar->debug.ring_head != ar->debug.ring_tail) {
ADD(buf, *ret, bufsize, "%.8x = %.8x\n",
ar->debug.ring[ar->debug.ring_head].reg,
ar->debug.ring[ar->debug.ring_head].value);
ar->debug.ring_head++;
ar->debug.ring_head %= CARL9170_DEBUG_RING_SIZE;
if (i++ == 64)
break;
}
ar->debug.ring_head = ar->debug.ring_tail;
return buf;
}
DEBUGFS_DECLARE_RW_FILE(hw_ioread32, CARL9170_DEBUG_RING_SIZE * 40);
static ssize_t carl9170_debugfs_bug_write(struct ar9170 *ar, const char *buf,
size_t count)
{
int err;
if (count < 1)
return -EINVAL;
switch (buf[0]) {
case 'F':
ar->needs_full_reset = true;
break;
case 'R':
if (!IS_STARTED(ar)) {
err = -EAGAIN;
goto out;
}
ar->needs_full_reset = false;
break;
case 'M':
err = carl9170_mac_reset(ar);
if (err < 0)
count = err;
goto out;
case 'P':
err = carl9170_set_channel(ar, ar->hw->conf.chandef.chan,
cfg80211_get_chandef_type(&ar->hw->conf.chandef));
if (err < 0)
count = err;
goto out;
default:
return -EINVAL;
}
carl9170_restart(ar, CARL9170_RR_USER_REQUEST);
out:
return count;
}
static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *ret)
{
ADD(buf, *ret, bufsize, "[P]hy reinit, [R]estart, [F]ull usb reset, "
"[M]ac reset\n");
ADD(buf, *ret, bufsize, "firmware restarts:%d, last reason:%d\n",
ar->restart_counter, ar->last_reason);
ADD(buf, *ret, bufsize, "phy reinit errors:%d (%d)\n",
ar->total_chan_fail, ar->chan_fail);
ADD(buf, *ret, bufsize, "reported firmware errors:%d\n",
ar->fw.err_counter);
ADD(buf, *ret, bufsize, "reported firmware BUGs:%d\n",
ar->fw.bug_counter);
ADD(buf, *ret, bufsize, "pending restart requests:%d\n",
atomic_read(&ar->pending_restarts));
return buf;
}
__DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED);
static const char *const erp_modes[] = {
[CARL9170_ERP_INVALID] = "INVALID",
[CARL9170_ERP_AUTO] = "Automatic",
[CARL9170_ERP_MAC80211] = "Set by MAC80211",
[CARL9170_ERP_OFF] = "Force Off",
[CARL9170_ERP_RTS] = "Force RTS",
[CARL9170_ERP_CTS] = "Force CTS"
};
static char *carl9170_debugfs_erp_read(struct ar9170 *ar, char *buf,
size_t bufsize, ssize_t *ret)
{
ADD(buf, *ret, bufsize, "ERP Setting: (%d) -> %s\n", ar->erp_mode,
erp_modes[ar->erp_mode]);
return buf;
}
static ssize_t carl9170_debugfs_erp_write(struct ar9170 *ar, const char *buf,
size_t count)
{
int res, val;
if (count < 1)
return -EINVAL;
res = sscanf(buf, "%d", &val);
if (res != 1)
return -EINVAL;
if (!((val > CARL9170_ERP_INVALID) &&
(val < __CARL9170_ERP_NUM)))
return -EINVAL;
ar->erp_mode = val;
return count;
}
DEBUGFS_DECLARE_RW_FILE(erp, 80);
static ssize_t carl9170_debugfs_hw_iowrite32_write(struct ar9170 *ar,
const char *buf, size_t count)
{
int err = 0, max_len = 22, res;
u32 reg, val;
if (!count)
return 0;
if (count > max_len)
return -E2BIG;
res = sscanf(buf, "0x%X 0x%X", ®, &val);
if (res != 2) {
err = -EINVAL;
goto out;
}
if (reg <= 0x100000 || reg >= 0x280000) {
err = -EADDRNOTAVAIL;
goto out;
}
if (reg & 3) {
err = -EINVAL;
goto out;
}
err = carl9170_write_reg(ar, reg, val);
if (err)
goto out;
out:
return err ? err : count;
}
DEBUGFS_DECLARE_WO_FILE(hw_iowrite32);
DEBUGFS_HW_TALLY_FILE(hw_tx_tally, "u");
DEBUGFS_HW_TALLY_FILE(hw_rx_tally, "u");
DEBUGFS_HW_TALLY_FILE(hw_phy_errors, "u");
DEBUGFS_HW_REG_FILE(hw_wlan_queue, ".8x");
DEBUGFS_HW_REG_FILE(hw_pta_queue, ".8x");
DEBUGFS_HW_REG_FILE(hw_ampdu_info, ".8x");
DEBUGFS_QUEUE_DUMP(tx_status, 0);
DEBUGFS_QUEUE_DUMP(tx_status, 1);
DEBUGFS_QUEUE_DUMP(tx_status, 2);
DEBUGFS_QUEUE_DUMP(tx_status, 3);
DEBUGFS_QUEUE_DUMP(tx_pending, 0);
DEBUGFS_QUEUE_DUMP(tx_pending, 1);
DEBUGFS_QUEUE_DUMP(tx_pending, 2);
DEBUGFS_QUEUE_DUMP(tx_pending, 3);
DEBUGFS_READONLY_FILE(usb_tx_anch_urbs, 20, "%d",
atomic_read(&ar->tx_anch_urbs));
DEBUGFS_READONLY_FILE(usb_rx_anch_urbs, 20, "%d",
atomic_read(&ar->rx_anch_urbs));
DEBUGFS_READONLY_FILE(usb_rx_work_urbs, 20, "%d",
atomic_read(&ar->rx_work_urbs));
DEBUGFS_READONLY_FILE(usb_rx_pool_urbs, 20, "%d",
atomic_read(&ar->rx_pool_urbs));
DEBUGFS_READONLY_FILE(tx_total_queued, 20, "%d",
atomic_read(&ar->tx_total_queued));
DEBUGFS_READONLY_FILE(tx_ampdu_scheduler, 20, "%d",
atomic_read(&ar->tx_ampdu_scheduler));
DEBUGFS_READONLY_FILE(tx_total_pending, 20, "%d",
atomic_read(&ar->tx_total_pending));
DEBUGFS_READONLY_FILE(tx_ampdu_list_len, 20, "%d",
ar->tx_ampdu_list_len);
DEBUGFS_READONLY_FILE(tx_ampdu_upload, 20, "%d",
atomic_read(&ar->tx_ampdu_upload));
DEBUGFS_READONLY_FILE(tx_janitor_last_run, 64, "last run:%d ms ago",
jiffies_to_msecs(jiffies - ar->tx_janitor_last_run));
DEBUGFS_READONLY_FILE(tx_dropped, 20, "%d", ar->tx_dropped);
DEBUGFS_READONLY_FILE(rx_dropped, 20, "%d", ar->rx_dropped);
DEBUGFS_READONLY_FILE(sniffer_enabled, 20, "%d", ar->sniffer_enabled);
DEBUGFS_READONLY_FILE(rx_software_decryption, 20, "%d",
ar->rx_software_decryption);
DEBUGFS_READONLY_FILE(ampdu_factor, 20, "%d",
ar->current_factor);
DEBUGFS_READONLY_FILE(ampdu_density, 20, "%d",
ar->current_density);
DEBUGFS_READONLY_FILE(beacon_int, 20, "%d TU", ar->global_beacon_int);
DEBUGFS_READONLY_FILE(pretbtt, 20, "%d TU", ar->global_pretbtt);
void carl9170_debugfs_register(struct ar9170 *ar)
{
ar->debug_dir = debugfs_create_dir(KBUILD_MODNAME,
ar->hw->wiphy->debugfsdir);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \
ar->debug_dir, ar, \
&carl_debugfs_##name ## _ops.fops)
DEBUGFS_ADD(usb_tx_anch_urbs);
DEBUGFS_ADD(usb_rx_pool_urbs);
DEBUGFS_ADD(usb_rx_anch_urbs);
DEBUGFS_ADD(usb_rx_work_urbs);
DEBUGFS_ADD(tx_total_queued);
DEBUGFS_ADD(tx_total_pending);
DEBUGFS_ADD(tx_dropped);
DEBUGFS_ADD(tx_stuck);
DEBUGFS_ADD(tx_ampdu_upload);
DEBUGFS_ADD(tx_ampdu_scheduler);
DEBUGFS_ADD(tx_ampdu_list_len);
DEBUGFS_ADD(rx_dropped);
DEBUGFS_ADD(sniffer_enabled);
DEBUGFS_ADD(rx_software_decryption);
DEBUGFS_ADD(mem_usage);
DEBUGFS_ADD(qos_stat);
DEBUGFS_ADD(sta_psm);
DEBUGFS_ADD(ampdu_state);
DEBUGFS_ADD(hw_tx_tally);
DEBUGFS_ADD(hw_rx_tally);
DEBUGFS_ADD(hw_phy_errors);
DEBUGFS_ADD(phy_noise);
DEBUGFS_ADD(hw_wlan_queue);
DEBUGFS_ADD(hw_pta_queue);
DEBUGFS_ADD(hw_ampdu_info);
DEBUGFS_ADD(ampdu_density);
DEBUGFS_ADD(ampdu_factor);
DEBUGFS_ADD(tx_janitor_last_run);
DEBUGFS_ADD(tx_status_0);
DEBUGFS_ADD(tx_status_1);
DEBUGFS_ADD(tx_status_2);
DEBUGFS_ADD(tx_status_3);
DEBUGFS_ADD(tx_pending_0);
DEBUGFS_ADD(tx_pending_1);
DEBUGFS_ADD(tx_pending_2);
DEBUGFS_ADD(tx_pending_3);
DEBUGFS_ADD(hw_ioread32);
DEBUGFS_ADD(hw_iowrite32);
DEBUGFS_ADD(bug);
DEBUGFS_ADD(erp);
DEBUGFS_ADD(vif_dump);
DEBUGFS_ADD(beacon_int);
DEBUGFS_ADD(pretbtt);
#undef DEBUGFS_ADD
}
void carl9170_debugfs_unregister(struct ar9170 *ar)
{
debugfs_remove_recursive(ar->debug_dir);
}
|
linux-master
|
drivers/net/wireless/ath/carl9170/debug.c
|
/*
* Atheros CARL9170 driver
*
* PHY and RF code
*
* Copyright 2008, Johannes Berg <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/bitrev.h>
#include "carl9170.h"
#include "cmd.h"
#include "phy.h"
static int carl9170_init_power_cal(struct ar9170 *ar)
{
carl9170_regwrite_begin(ar);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE_MAX, 0x7f);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE1, 0x3f3f3f3f);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE2, 0x3f3f3f3f);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE3, 0x3f3f3f3f);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE4, 0x3f3f3f3f);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE5, 0x3f3f3f3f);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE6, 0x3f3f3f3f);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE7, 0x3f3f3f3f);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE8, 0x3f3f3f3f);
carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE9, 0x3f3f3f3f);
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
struct carl9170_phy_init {
u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20;
};
static struct carl9170_phy_init ar5416_phy_init[] = {
{ 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
{ 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, },
{ 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, },
{ 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, },
{ 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, },
{ 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, },
{ 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, },
{ 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
{ 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, },
{ 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
{ 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
{ 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
{ 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, },
{ 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, },
{ 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, },
{ 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, },
{ 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, },
{ 0x1c5850, 0x6c48b4e4, 0x6d48b4e4, 0x6d48b0e4, 0x6c48b0e4, },
{ 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, },
{ 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, },
{ 0x1c585c, 0x31395c5e, 0x3139605e, 0x3139605e, 0x31395c5e, },
{ 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, },
{ 0x1c5864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
{ 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, },
{ 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, },
{ 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, },
{ 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, },
{ 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, },
{ 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, },
{ 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, },
{ 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
{ 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
{ 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
{ 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
{ 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, },
{ 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, },
{ 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, },
{ 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, },
{ 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, },
{ 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, },
{ 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
{ 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, },
{ 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, },
{ 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
{ 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
{ 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, },
{ 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, },
{ 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, },
{ 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, },
{ 0x1c59bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
{ 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, },
{ 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, },
{ 0x1c59c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, },
{ 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, },
{ 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, },
{ 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, },
{ 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, },
{ 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, },
{ 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, },
{ 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, },
{ 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, },
{ 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, },
{ 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, },
{ 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, },
{ 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, },
{ 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, },
{ 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, },
{ 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, },
{ 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, },
{ 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
{ 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, },
{ 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, },
{ 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, },
{ 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, },
{ 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, },
{ 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, },
{ 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, },
{ 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, },
{ 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, },
{ 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, },
{ 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, },
{ 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, },
{ 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, },
{ 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, },
{ 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, },
{ 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, },
{ 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, },
{ 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, },
{ 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, },
{ 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, },
{ 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, },
{ 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, },
{ 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, },
{ 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, },
{ 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, },
{ 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, },
{ 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, },
{ 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, },
{ 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, },
{ 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
{ 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
{ 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, },
{ 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, },
{ 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
{ 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, },
{ 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, },
{ 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, },
{ 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, },
{ 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, },
{ 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, },
{ 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, },
{ 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
{ 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, },
{ 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, },
{ 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, },
{ 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, },
{ 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, },
{ 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, },
{ 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, },
{ 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
{ 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, },
{ 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, },
{ 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, },
{ 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, },
{ 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, },
{ 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, },
{ 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, },
{ 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, },
{ 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, },
{ 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
{ 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, },
{ 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, },
{ 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, },
{ 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, },
{ 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, },
{ 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, },
{ 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, },
{ 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, },
{ 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, },
{ 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, },
{ 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
{ 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
{ 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
{ 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, },
{ 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, },
{ 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, },
{ 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
{ 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, },
{ 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, },
{ 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, },
{ 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, },
{ 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, },
{ 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, },
{ 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, },
{ 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, },
{ 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
{ 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
{ 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, },
{ 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, },
{ 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, },
{ 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, },
{ 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
{ 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
{ 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, },
{ 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, },
{ 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, },
{ 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, },
{ 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
{ 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, },
{ 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
{ 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, },
{ 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, },
{ 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, },
{ 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, },
{ 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, },
{ 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, },
{ 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, },
{ 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, },
{ 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, },
{ 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, },
{ 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, },
{ 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, },
{ 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
{ 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
{ 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
{ 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, },
{ 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, },
{ 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, },
{ 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
{ 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
{ 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
{ 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, },
{ 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
{ 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
{ 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
{ 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
{ 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
{ 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
{ 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, },
{ 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
{ 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
{ 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
{ 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
{ 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
{ 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
{ 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
{ 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
{ 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
{ 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */
{ 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
{ 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, },
{ 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, },
{ 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
{ 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, },
{ 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, },
{ 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, },
{ 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, },
{ 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, },
{ 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, },
{ 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, },
{ 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, },
{ 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, },
{ 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, },
{ 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, },
{ 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, }
};
/*
* look up a certain register in ar5416_phy_init[] and return the init. value
* for the band and bandwidth given. Return 0 if register address not found.
*/
static u32 carl9170_def_val(u32 reg, bool is_2ghz, bool is_40mhz)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
if (ar5416_phy_init[i].reg != reg)
continue;
if (is_2ghz) {
if (is_40mhz)
return ar5416_phy_init[i]._2ghz_40;
else
return ar5416_phy_init[i]._2ghz_20;
} else {
if (is_40mhz)
return ar5416_phy_init[i]._5ghz_40;
else
return ar5416_phy_init[i]._5ghz_20;
}
}
return 0;
}
/*
* initialize some phy regs from eeprom values in modal_header[]
* acc. to band and bandwidth
*/
static int carl9170_init_phy_from_eeprom(struct ar9170 *ar,
bool is_2ghz, bool is_40mhz)
{
static const u8 xpd2pd[16] = {
0x2, 0x2, 0x2, 0x1, 0x2, 0x2, 0x6, 0x2,
0x2, 0x3, 0x7, 0x2, 0xb, 0x2, 0x2, 0x2
};
/* pointer to the modal_header acc. to band */
struct ar9170_eeprom_modal *m = &ar->eeprom.modal_header[is_2ghz];
u32 val;
carl9170_regwrite_begin(ar);
/* ant common control (index 0) */
carl9170_regwrite(AR9170_PHY_REG_SWITCH_COM,
le32_to_cpu(m->antCtrlCommon));
/* ant control chain 0 (index 1) */
carl9170_regwrite(AR9170_PHY_REG_SWITCH_CHAIN_0,
le32_to_cpu(m->antCtrlChain[0]));
/* ant control chain 2 (index 2) */
carl9170_regwrite(AR9170_PHY_REG_SWITCH_CHAIN_2,
le32_to_cpu(m->antCtrlChain[1]));
/* SwSettle (index 3) */
if (!is_40mhz) {
val = carl9170_def_val(AR9170_PHY_REG_SETTLING,
is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_SETTLING_SWITCH, val, m->switchSettling);
carl9170_regwrite(AR9170_PHY_REG_SETTLING, val);
}
/* adcDesired, pdaDesired (index 4) */
val = carl9170_def_val(AR9170_PHY_REG_DESIRED_SZ, is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_DESIRED_SZ_PGA, val, m->pgaDesiredSize);
SET_VAL(AR9170_PHY_DESIRED_SZ_ADC, val, m->adcDesiredSize);
carl9170_regwrite(AR9170_PHY_REG_DESIRED_SZ, val);
/* TxEndToXpaOff, TxFrameToXpaOn (index 5) */
val = carl9170_def_val(AR9170_PHY_REG_RF_CTL4, is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF, val, m->txEndToXpaOff);
SET_VAL(AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF, val, m->txEndToXpaOff);
SET_VAL(AR9170_PHY_RF_CTL4_FRAME_XPAB_ON, val, m->txFrameToXpaOn);
SET_VAL(AR9170_PHY_RF_CTL4_FRAME_XPAA_ON, val, m->txFrameToXpaOn);
carl9170_regwrite(AR9170_PHY_REG_RF_CTL4, val);
/* TxEndToRxOn (index 6) */
val = carl9170_def_val(AR9170_PHY_REG_RF_CTL3, is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON, val, m->txEndToRxOn);
carl9170_regwrite(AR9170_PHY_REG_RF_CTL3, val);
/* thresh62 (index 7) */
val = carl9170_def_val(0x1c8864, is_2ghz, is_40mhz);
val = (val & ~0x7f000) | (m->thresh62 << 12);
carl9170_regwrite(0x1c8864, val);
/* tx/rx attenuation chain 0 (index 8) */
val = carl9170_def_val(AR9170_PHY_REG_RXGAIN, is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_RXGAIN_TXRX_ATTEN, val, m->txRxAttenCh[0]);
carl9170_regwrite(AR9170_PHY_REG_RXGAIN, val);
/* tx/rx attenuation chain 2 (index 9) */
val = carl9170_def_val(AR9170_PHY_REG_RXGAIN_CHAIN_2,
is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_RXGAIN_TXRX_ATTEN, val, m->txRxAttenCh[1]);
carl9170_regwrite(AR9170_PHY_REG_RXGAIN_CHAIN_2, val);
/* tx/rx margin chain 0 (index 10) */
val = carl9170_def_val(AR9170_PHY_REG_GAIN_2GHZ, is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN, val, m->rxTxMarginCh[0]);
/* bsw margin chain 0 for 5GHz only */
if (!is_2ghz)
SET_VAL(AR9170_PHY_GAIN_2GHZ_BSW_MARGIN, val, m->bswMargin[0]);
carl9170_regwrite(AR9170_PHY_REG_GAIN_2GHZ, val);
/* tx/rx margin chain 2 (index 11) */
val = carl9170_def_val(AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2,
is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN, val, m->rxTxMarginCh[1]);
carl9170_regwrite(AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2, val);
/* iqCall, iqCallq chain 0 (index 12) */
val = carl9170_def_val(AR9170_PHY_REG_TIMING_CTRL4(0),
is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, val, m->iqCalICh[0]);
SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, val, m->iqCalQCh[0]);
carl9170_regwrite(AR9170_PHY_REG_TIMING_CTRL4(0), val);
/* iqCall, iqCallq chain 2 (index 13) */
val = carl9170_def_val(AR9170_PHY_REG_TIMING_CTRL4(2),
is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, val, m->iqCalICh[1]);
SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, val, m->iqCalQCh[1]);
carl9170_regwrite(AR9170_PHY_REG_TIMING_CTRL4(2), val);
/* xpd gain mask (index 14) */
val = carl9170_def_val(AR9170_PHY_REG_TPCRG1, is_2ghz, is_40mhz);
SET_VAL(AR9170_PHY_TPCRG1_PD_GAIN_1, val,
xpd2pd[m->xpdGain & 0xf] & 3);
SET_VAL(AR9170_PHY_TPCRG1_PD_GAIN_2, val,
xpd2pd[m->xpdGain & 0xf] >> 2);
carl9170_regwrite(AR9170_PHY_REG_TPCRG1, val);
carl9170_regwrite(AR9170_PHY_REG_RX_CHAINMASK, ar->eeprom.rx_mask);
carl9170_regwrite(AR9170_PHY_REG_CAL_CHAINMASK, ar->eeprom.rx_mask);
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
static int carl9170_init_phy(struct ar9170 *ar, enum nl80211_band band)
{
int i, err;
u32 val;
bool is_2ghz = band == NL80211_BAND_2GHZ;
bool is_40mhz = conf_is_ht40(&ar->hw->conf);
carl9170_regwrite_begin(ar);
for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
if (is_40mhz) {
if (is_2ghz)
val = ar5416_phy_init[i]._2ghz_40;
else
val = ar5416_phy_init[i]._5ghz_40;
} else {
if (is_2ghz)
val = ar5416_phy_init[i]._2ghz_20;
else
val = ar5416_phy_init[i]._5ghz_20;
}
carl9170_regwrite(ar5416_phy_init[i].reg, val);
}
carl9170_regwrite_finish();
err = carl9170_regwrite_result();
if (err)
return err;
err = carl9170_init_phy_from_eeprom(ar, is_2ghz, is_40mhz);
if (err)
return err;
err = carl9170_init_power_cal(ar);
if (err)
return err;
if (!ar->fw.hw_counters) {
err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC,
is_2ghz ? 0x5163 : 0x5143);
}
return err;
}
struct carl9170_rf_initvals {
u32 reg, _5ghz, _2ghz;
};
static struct carl9170_rf_initvals carl9170_rf_initval[] = {
/* bank 0 */
{ 0x1c58b0, 0x1e5795e5, 0x1e5795e5},
{ 0x1c58e0, 0x02008020, 0x02008020},
/* bank 1 */
{ 0x1c58b0, 0x02108421, 0x02108421},
{ 0x1c58ec, 0x00000008, 0x00000008},
/* bank 2 */
{ 0x1c58b0, 0x0e73ff17, 0x0e73ff17},
{ 0x1c58e0, 0x00000420, 0x00000420},
/* bank 3 */
{ 0x1c58f0, 0x01400018, 0x01c00018},
/* bank 4 */
{ 0x1c58b0, 0x000001a1, 0x000001a1},
{ 0x1c58e8, 0x00000001, 0x00000001},
/* bank 5 */
{ 0x1c58b0, 0x00000013, 0x00000013},
{ 0x1c58e4, 0x00000002, 0x00000002},
/* bank 6 */
{ 0x1c58b0, 0x00000000, 0x00000000},
{ 0x1c58b0, 0x00000000, 0x00000000},
{ 0x1c58b0, 0x00000000, 0x00000000},
{ 0x1c58b0, 0x00000000, 0x00000000},
{ 0x1c58b0, 0x00000000, 0x00000000},
{ 0x1c58b0, 0x00004000, 0x00004000},
{ 0x1c58b0, 0x00006c00, 0x00006c00},
{ 0x1c58b0, 0x00002c00, 0x00002c00},
{ 0x1c58b0, 0x00004800, 0x00004800},
{ 0x1c58b0, 0x00004000, 0x00004000},
{ 0x1c58b0, 0x00006000, 0x00006000},
{ 0x1c58b0, 0x00001000, 0x00001000},
{ 0x1c58b0, 0x00004000, 0x00004000},
{ 0x1c58b0, 0x00007c00, 0x00007c00},
{ 0x1c58b0, 0x00007c00, 0x00007c00},
{ 0x1c58b0, 0x00007c00, 0x00007c00},
{ 0x1c58b0, 0x00007c00, 0x00007c00},
{ 0x1c58b0, 0x00007c00, 0x00007c00},
{ 0x1c58b0, 0x00087c00, 0x00087c00},
{ 0x1c58b0, 0x00007c00, 0x00007c00},
{ 0x1c58b0, 0x00005400, 0x00005400},
{ 0x1c58b0, 0x00000c00, 0x00000c00},
{ 0x1c58b0, 0x00001800, 0x00001800},
{ 0x1c58b0, 0x00007c00, 0x00007c00},
{ 0x1c58b0, 0x00006c00, 0x00006c00},
{ 0x1c58b0, 0x00006c00, 0x00006c00},
{ 0x1c58b0, 0x00007c00, 0x00007c00},
{ 0x1c58b0, 0x00002c00, 0x00002c00},
{ 0x1c58b0, 0x00003c00, 0x00003c00},
{ 0x1c58b0, 0x00003800, 0x00003800},
{ 0x1c58b0, 0x00001c00, 0x00001c00},
{ 0x1c58b0, 0x00000800, 0x00000800},
{ 0x1c58b0, 0x00000408, 0x00000408},
{ 0x1c58b0, 0x00004c15, 0x00004c15},
{ 0x1c58b0, 0x00004188, 0x00004188},
{ 0x1c58b0, 0x0000201e, 0x0000201e},
{ 0x1c58b0, 0x00010408, 0x00010408},
{ 0x1c58b0, 0x00000801, 0x00000801},
{ 0x1c58b0, 0x00000c08, 0x00000c08},
{ 0x1c58b0, 0x0000181e, 0x0000181e},
{ 0x1c58b0, 0x00001016, 0x00001016},
{ 0x1c58b0, 0x00002800, 0x00002800},
{ 0x1c58b0, 0x00004010, 0x00004010},
{ 0x1c58b0, 0x0000081c, 0x0000081c},
{ 0x1c58b0, 0x00000115, 0x00000115},
{ 0x1c58b0, 0x00000015, 0x00000015},
{ 0x1c58b0, 0x00000066, 0x00000066},
{ 0x1c58b0, 0x0000001c, 0x0000001c},
{ 0x1c58b0, 0x00000000, 0x00000000},
{ 0x1c58b0, 0x00000004, 0x00000004},
{ 0x1c58b0, 0x00000015, 0x00000015},
{ 0x1c58b0, 0x0000001f, 0x0000001f},
{ 0x1c58e0, 0x00000000, 0x00000400},
/* bank 7 */
{ 0x1c58b0, 0x000000a0, 0x000000a0},
{ 0x1c58b0, 0x00000000, 0x00000000},
{ 0x1c58b0, 0x00000040, 0x00000040},
{ 0x1c58f0, 0x0000001c, 0x0000001c},
};
static int carl9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz)
{
int err, i;
carl9170_regwrite_begin(ar);
for (i = 0; i < ARRAY_SIZE(carl9170_rf_initval); i++)
carl9170_regwrite(carl9170_rf_initval[i].reg,
band5ghz ? carl9170_rf_initval[i]._5ghz
: carl9170_rf_initval[i]._2ghz);
carl9170_regwrite_finish();
err = carl9170_regwrite_result();
if (err)
wiphy_err(ar->hw->wiphy, "rf init failed\n");
return err;
}
struct carl9170_phy_freq_params {
u8 coeff_exp;
u16 coeff_man;
u8 coeff_exp_shgi;
u16 coeff_man_shgi;
};
enum carl9170_bw {
CARL9170_BW_20,
CARL9170_BW_40_BELOW,
CARL9170_BW_40_ABOVE,
__CARL9170_NUM_BW,
};
struct carl9170_phy_freq_entry {
u16 freq;
struct carl9170_phy_freq_params params[__CARL9170_NUM_BW];
};
/* NB: must be in sync with channel tables in main! */
static const struct carl9170_phy_freq_entry carl9170_phy_freq_params[] = {
/*
* freq,
* 20MHz,
* 40MHz (below),
* 40Mhz (above),
*/
{ 2412, {
{ 3, 21737, 3, 19563, },
{ 3, 21827, 3, 19644, },
{ 3, 21647, 3, 19482, },
} },
{ 2417, {
{ 3, 21692, 3, 19523, },
{ 3, 21782, 3, 19604, },
{ 3, 21602, 3, 19442, },
} },
{ 2422, {
{ 3, 21647, 3, 19482, },
{ 3, 21737, 3, 19563, },
{ 3, 21558, 3, 19402, },
} },
{ 2427, {
{ 3, 21602, 3, 19442, },
{ 3, 21692, 3, 19523, },
{ 3, 21514, 3, 19362, },
} },
{ 2432, {
{ 3, 21558, 3, 19402, },
{ 3, 21647, 3, 19482, },
{ 3, 21470, 3, 19323, },
} },
{ 2437, {
{ 3, 21514, 3, 19362, },
{ 3, 21602, 3, 19442, },
{ 3, 21426, 3, 19283, },
} },
{ 2442, {
{ 3, 21470, 3, 19323, },
{ 3, 21558, 3, 19402, },
{ 3, 21382, 3, 19244, },
} },
{ 2447, {
{ 3, 21426, 3, 19283, },
{ 3, 21514, 3, 19362, },
{ 3, 21339, 3, 19205, },
} },
{ 2452, {
{ 3, 21382, 3, 19244, },
{ 3, 21470, 3, 19323, },
{ 3, 21295, 3, 19166, },
} },
{ 2457, {
{ 3, 21339, 3, 19205, },
{ 3, 21426, 3, 19283, },
{ 3, 21252, 3, 19127, },
} },
{ 2462, {
{ 3, 21295, 3, 19166, },
{ 3, 21382, 3, 19244, },
{ 3, 21209, 3, 19088, },
} },
{ 2467, {
{ 3, 21252, 3, 19127, },
{ 3, 21339, 3, 19205, },
{ 3, 21166, 3, 19050, },
} },
{ 2472, {
{ 3, 21209, 3, 19088, },
{ 3, 21295, 3, 19166, },
{ 3, 21124, 3, 19011, },
} },
{ 2484, {
{ 3, 21107, 3, 18996, },
{ 3, 21192, 3, 19073, },
{ 3, 21022, 3, 18920, },
} },
{ 4920, {
{ 4, 21313, 4, 19181, },
{ 4, 21356, 4, 19220, },
{ 4, 21269, 4, 19142, },
} },
{ 4940, {
{ 4, 21226, 4, 19104, },
{ 4, 21269, 4, 19142, },
{ 4, 21183, 4, 19065, },
} },
{ 4960, {
{ 4, 21141, 4, 19027, },
{ 4, 21183, 4, 19065, },
{ 4, 21098, 4, 18988, },
} },
{ 4980, {
{ 4, 21056, 4, 18950, },
{ 4, 21098, 4, 18988, },
{ 4, 21014, 4, 18912, },
} },
{ 5040, {
{ 4, 20805, 4, 18725, },
{ 4, 20846, 4, 18762, },
{ 4, 20764, 4, 18687, },
} },
{ 5060, {
{ 4, 20723, 4, 18651, },
{ 4, 20764, 4, 18687, },
{ 4, 20682, 4, 18614, },
} },
{ 5080, {
{ 4, 20641, 4, 18577, },
{ 4, 20682, 4, 18614, },
{ 4, 20601, 4, 18541, },
} },
{ 5180, {
{ 4, 20243, 4, 18219, },
{ 4, 20282, 4, 18254, },
{ 4, 20204, 4, 18183, },
} },
{ 5200, {
{ 4, 20165, 4, 18148, },
{ 4, 20204, 4, 18183, },
{ 4, 20126, 4, 18114, },
} },
{ 5220, {
{ 4, 20088, 4, 18079, },
{ 4, 20126, 4, 18114, },
{ 4, 20049, 4, 18044, },
} },
{ 5240, {
{ 4, 20011, 4, 18010, },
{ 4, 20049, 4, 18044, },
{ 4, 19973, 4, 17976, },
} },
{ 5260, {
{ 4, 19935, 4, 17941, },
{ 4, 19973, 4, 17976, },
{ 4, 19897, 4, 17907, },
} },
{ 5280, {
{ 4, 19859, 4, 17873, },
{ 4, 19897, 4, 17907, },
{ 4, 19822, 4, 17840, },
} },
{ 5300, {
{ 4, 19784, 4, 17806, },
{ 4, 19822, 4, 17840, },
{ 4, 19747, 4, 17772, },
} },
{ 5320, {
{ 4, 19710, 4, 17739, },
{ 4, 19747, 4, 17772, },
{ 4, 19673, 4, 17706, },
} },
{ 5500, {
{ 4, 19065, 4, 17159, },
{ 4, 19100, 4, 17190, },
{ 4, 19030, 4, 17127, },
} },
{ 5520, {
{ 4, 18996, 4, 17096, },
{ 4, 19030, 4, 17127, },
{ 4, 18962, 4, 17065, },
} },
{ 5540, {
{ 4, 18927, 4, 17035, },
{ 4, 18962, 4, 17065, },
{ 4, 18893, 4, 17004, },
} },
{ 5560, {
{ 4, 18859, 4, 16973, },
{ 4, 18893, 4, 17004, },
{ 4, 18825, 4, 16943, },
} },
{ 5580, {
{ 4, 18792, 4, 16913, },
{ 4, 18825, 4, 16943, },
{ 4, 18758, 4, 16882, },
} },
{ 5600, {
{ 4, 18725, 4, 16852, },
{ 4, 18758, 4, 16882, },
{ 4, 18691, 4, 16822, },
} },
{ 5620, {
{ 4, 18658, 4, 16792, },
{ 4, 18691, 4, 16822, },
{ 4, 18625, 4, 16762, },
} },
{ 5640, {
{ 4, 18592, 4, 16733, },
{ 4, 18625, 4, 16762, },
{ 4, 18559, 4, 16703, },
} },
{ 5660, {
{ 4, 18526, 4, 16673, },
{ 4, 18559, 4, 16703, },
{ 4, 18493, 4, 16644, },
} },
{ 5680, {
{ 4, 18461, 4, 16615, },
{ 4, 18493, 4, 16644, },
{ 4, 18428, 4, 16586, },
} },
{ 5700, {
{ 4, 18396, 4, 16556, },
{ 4, 18428, 4, 16586, },
{ 4, 18364, 4, 16527, },
} },
{ 5745, {
{ 4, 18252, 4, 16427, },
{ 4, 18284, 4, 16455, },
{ 4, 18220, 4, 16398, },
} },
{ 5765, {
{ 4, 18189, 5, 32740, },
{ 4, 18220, 4, 16398, },
{ 4, 18157, 5, 32683, },
} },
{ 5785, {
{ 4, 18126, 5, 32626, },
{ 4, 18157, 5, 32683, },
{ 4, 18094, 5, 32570, },
} },
{ 5805, {
{ 4, 18063, 5, 32514, },
{ 4, 18094, 5, 32570, },
{ 4, 18032, 5, 32458, },
} },
{ 5825, {
{ 4, 18001, 5, 32402, },
{ 4, 18032, 5, 32458, },
{ 4, 17970, 5, 32347, },
} },
{ 5170, {
{ 4, 20282, 4, 18254, },
{ 4, 20321, 4, 18289, },
{ 4, 20243, 4, 18219, },
} },
{ 5190, {
{ 4, 20204, 4, 18183, },
{ 4, 20243, 4, 18219, },
{ 4, 20165, 4, 18148, },
} },
{ 5210, {
{ 4, 20126, 4, 18114, },
{ 4, 20165, 4, 18148, },
{ 4, 20088, 4, 18079, },
} },
{ 5230, {
{ 4, 20049, 4, 18044, },
{ 4, 20088, 4, 18079, },
{ 4, 20011, 4, 18010, },
} },
};
static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
u32 freq, enum carl9170_bw bw)
{
int err;
u32 d0, d1, td0, td1, fd0, fd1;
u8 chansel;
u8 refsel0 = 1, refsel1 = 0;
u8 lf_synth = 0;
switch (bw) {
case CARL9170_BW_40_ABOVE:
freq += 10;
break;
case CARL9170_BW_40_BELOW:
freq -= 10;
break;
case CARL9170_BW_20:
break;
default:
BUG();
return -ENOSYS;
}
if (band5ghz) {
if (freq % 10) {
chansel = (freq - 4800) / 5;
} else {
chansel = ((freq - 4800) / 10) * 2;
refsel0 = 0;
refsel1 = 1;
}
chansel = bitrev8(chansel);
} else {
if (freq == 2484) {
chansel = 10 + (freq - 2274) / 5;
lf_synth = 1;
} else
chansel = 16 + (freq - 2272) / 5;
chansel *= 4;
chansel = bitrev8(chansel);
}
d1 = chansel;
d0 = 0x21 |
refsel0 << 3 |
refsel1 << 2 |
lf_synth << 1;
td0 = d0 & 0x1f;
td1 = d1 & 0x1f;
fd0 = td1 << 5 | td0;
td0 = (d0 >> 5) & 0x7;
td1 = (d1 >> 5) & 0x7;
fd1 = td1 << 5 | td0;
carl9170_regwrite_begin(ar);
carl9170_regwrite(0x1c58b0, fd0);
carl9170_regwrite(0x1c58e8, fd1);
carl9170_regwrite_finish();
err = carl9170_regwrite_result();
if (err)
return err;
return 0;
}
static const struct carl9170_phy_freq_params *
carl9170_get_hw_dyn_params(struct ieee80211_channel *channel,
enum carl9170_bw bw)
{
unsigned int chanidx = 0;
u16 freq = 2412;
if (channel) {
chanidx = channel->hw_value;
freq = channel->center_freq;
}
BUG_ON(chanidx >= ARRAY_SIZE(carl9170_phy_freq_params));
BUILD_BUG_ON(__CARL9170_NUM_BW != 3);
WARN_ON(carl9170_phy_freq_params[chanidx].freq != freq);
return &carl9170_phy_freq_params[chanidx].params[bw];
}
static int carl9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f)
{
int idx = nfreqs - 2;
while (idx >= 0) {
if (f >= freqs[idx])
return idx;
idx--;
}
return 0;
}
static s32 carl9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
{
/* nothing to interpolate, it's horizontal */
if (y2 == y1)
return y1;
/* check if we hit one of the edges */
if (x == x1)
return y1;
if (x == x2)
return y2;
/* x1 == x2 is bad, hopefully == x */
if (x2 == x1)
return y1;
return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1));
}
static u8 carl9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
{
#define SHIFT 8
s32 y;
y = carl9170_interpolate_s32(x << SHIFT, x1 << SHIFT,
y1 << SHIFT, x2 << SHIFT, y2 << SHIFT);
/*
* XXX: unwrap this expression
* Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
* Can we rely on the compiler to optimise away the div?
*/
return (y >> SHIFT) + ((y & (1 << (SHIFT - 1))) >> (SHIFT - 1));
#undef SHIFT
}
static u8 carl9170_interpolate_val(u8 x, u8 *x_array, u8 *y_array)
{
int i;
for (i = 0; i < 3; i++) {
if (x <= x_array[i + 1])
break;
}
return carl9170_interpolate_u8(x, x_array[i], y_array[i],
x_array[i + 1], y_array[i + 1]);
}
static int carl9170_set_freq_cal_data(struct ar9170 *ar,
struct ieee80211_channel *channel)
{
u8 *cal_freq_pier;
u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
int chain, idx, i;
u32 phy_data = 0;
u8 f, tmp;
switch (channel->band) {
case NL80211_BAND_2GHZ:
f = channel->center_freq - 2300;
cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
i = AR5416_NUM_2G_CAL_PIERS - 1;
break;
case NL80211_BAND_5GHZ:
f = (channel->center_freq - 4800) / 5;
cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
i = AR5416_NUM_5G_CAL_PIERS - 1;
break;
default:
return -EINVAL;
}
for (; i >= 0; i--) {
if (cal_freq_pier[i] != 0xff)
break;
}
if (i < 0)
return -EINVAL;
idx = carl9170_find_freq_idx(i, cal_freq_pier, f);
carl9170_regwrite_begin(ar);
for (chain = 0; chain < AR5416_MAX_CHAINS; chain++) {
for (i = 0; i < AR5416_PD_GAIN_ICEPTS; i++) {
struct ar9170_calibration_data_per_freq *cal_pier_data;
int j;
switch (channel->band) {
case NL80211_BAND_2GHZ:
cal_pier_data = &ar->eeprom.
cal_pier_data_2G[chain][idx];
break;
case NL80211_BAND_5GHZ:
cal_pier_data = &ar->eeprom.
cal_pier_data_5G[chain][idx];
break;
default:
return -EINVAL;
}
for (j = 0; j < 2; j++) {
vpds[j][i] = carl9170_interpolate_u8(f,
cal_freq_pier[idx],
cal_pier_data->vpd_pdg[j][i],
cal_freq_pier[idx + 1],
cal_pier_data[1].vpd_pdg[j][i]);
pwrs[j][i] = carl9170_interpolate_u8(f,
cal_freq_pier[idx],
cal_pier_data->pwr_pdg[j][i],
cal_freq_pier[idx + 1],
cal_pier_data[1].pwr_pdg[j][i]) / 2;
}
}
for (i = 0; i < 76; i++) {
if (i < 25) {
tmp = carl9170_interpolate_val(i, &pwrs[0][0],
&vpds[0][0]);
} else {
tmp = carl9170_interpolate_val(i - 12,
&pwrs[1][0],
&vpds[1][0]);
}
phy_data |= tmp << ((i & 3) << 3);
if ((i & 3) == 3) {
carl9170_regwrite(0x1c6280 + chain * 0x1000 +
(i & ~3), phy_data);
phy_data = 0;
}
}
for (i = 19; i < 32; i++)
carl9170_regwrite(0x1c6280 + chain * 0x1000 + (i << 2),
0x0);
}
carl9170_regwrite_finish();
return carl9170_regwrite_result();
}
static u8 carl9170_get_max_edge_power(struct ar9170 *ar,
u32 freq, struct ar9170_calctl_edges edges[])
{
int i;
u8 rc = AR5416_MAX_RATE_POWER;
u8 f;
if (freq < 3000)
f = freq - 2300;
else
f = (freq - 4800) / 5;
for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
if (edges[i].channel == 0xff)
break;
if (f == edges[i].channel) {
/* exact freq match */
rc = edges[i].power_flags & ~AR9170_CALCTL_EDGE_FLAGS;
break;
}
if (i > 0 && f < edges[i].channel) {
if (f > edges[i - 1].channel &&
edges[i - 1].power_flags &
AR9170_CALCTL_EDGE_FLAGS) {
/* lower channel has the inband flag set */
rc = edges[i - 1].power_flags &
~AR9170_CALCTL_EDGE_FLAGS;
}
break;
}
}
if (i == AR5416_NUM_BAND_EDGES) {
if (f > edges[i - 1].channel &&
edges[i - 1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
/* lower channel has the inband flag set */
rc = edges[i - 1].power_flags &
~AR9170_CALCTL_EDGE_FLAGS;
}
}
return rc;
}
static u8 carl9170_get_heavy_clip(struct ar9170 *ar, u32 freq,
enum carl9170_bw bw, struct ar9170_calctl_edges edges[])
{
u8 f;
int i;
u8 rc = 0;
if (freq < 3000)
f = freq - 2300;
else
f = (freq - 4800) / 5;
if (bw == CARL9170_BW_40_BELOW || bw == CARL9170_BW_40_ABOVE)
rc |= 0xf0;
for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
if (edges[i].channel == 0xff)
break;
if (f == edges[i].channel) {
if (!(edges[i].power_flags & AR9170_CALCTL_EDGE_FLAGS))
rc |= 0x0f;
break;
}
}
return rc;
}
/*
* calculate the conformance test limits and the heavy clip parameter
* and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706)
*/
static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
{
u8 ctl_grp; /* CTL group */
u8 ctl_idx; /* CTL index */
int i, j;
struct ctl_modes {
u8 ctl_mode;
u8 max_power;
u8 *pwr_cal_data;
int pwr_cal_len;
} *modes;
/*
* order is relevant in the mode_list_*: we fall back to the
* lower indices if any mode is missed in the EEPROM.
*/
struct ctl_modes mode_list_2ghz[] = {
{ CTL_11B, 0, ar->power_2G_cck, 4 },
{ CTL_11G, 0, ar->power_2G_ofdm, 4 },
{ CTL_2GHT20, 0, ar->power_2G_ht20, 8 },
{ CTL_2GHT40, 0, ar->power_2G_ht40, 8 },
};
struct ctl_modes mode_list_5ghz[] = {
{ CTL_11A, 0, ar->power_5G_leg, 4 },
{ CTL_5GHT20, 0, ar->power_5G_ht20, 8 },
{ CTL_5GHT40, 0, ar->power_5G_ht40, 8 },
};
int nr_modes;
#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n])
ar->heavy_clip = 0;
/*
* TODO: investigate the differences between OTUS'
* hpreg.c::zfHpGetRegulatoryDomain() and
* ath/regd.c::ath_regd_get_band_ctl() -
* e.g. for FCC3_WORLD the OTUS procedure
* always returns CTL_FCC, while the one in ath/ delivers
* CTL_ETSI for 2GHz and CTL_FCC for 5GHz.
*/
ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory,
ar->hw->conf.chandef.chan->band);
/* ctl group not found - either invalid band (NO_CTL) or ww roaming */
if (ctl_grp == NO_CTL || ctl_grp == SD_NO_CTL)
ctl_grp = CTL_FCC;
if (ctl_grp != CTL_FCC)
/* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
return;
if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
modes = mode_list_2ghz;
nr_modes = ARRAY_SIZE(mode_list_2ghz);
} else {
modes = mode_list_5ghz;
nr_modes = ARRAY_SIZE(mode_list_5ghz);
}
for (i = 0; i < nr_modes; i++) {
u8 c = ctl_grp | modes[i].ctl_mode;
for (ctl_idx = 0; ctl_idx < AR5416_NUM_CTLS; ctl_idx++)
if (c == ar->eeprom.ctl_index[ctl_idx])
break;
if (ctl_idx < AR5416_NUM_CTLS) {
int f_off = 0;
/*
* determine heavy clip parameter
* from the 11G edges array
*/
if (modes[i].ctl_mode == CTL_11G) {
ar->heavy_clip =
carl9170_get_heavy_clip(ar,
freq, bw, EDGES(ctl_idx, 1));
}
/* adjust freq for 40MHz */
if (modes[i].ctl_mode == CTL_2GHT40 ||
modes[i].ctl_mode == CTL_5GHT40) {
if (bw == CARL9170_BW_40_BELOW)
f_off = -10;
else
f_off = 10;
}
modes[i].max_power =
carl9170_get_max_edge_power(ar,
freq + f_off, EDGES(ctl_idx, 1));
/*
* TODO: check if the regulatory max. power is
* controlled by cfg80211 for DFS.
* (hpmain applies it to max_power itself for DFS freq)
*/
} else {
/*
* Workaround in otus driver, hpmain.c, line 3906:
* if no data for 5GHT20 are found, take the
* legacy 5G value. We extend this here to fallback
* from any other HT* or 11G, too.
*/
int k = i;
modes[i].max_power = AR5416_MAX_RATE_POWER;
while (k-- > 0) {
if (modes[k].max_power !=
AR5416_MAX_RATE_POWER) {
modes[i].max_power = modes[k].max_power;
break;
}
}
}
/* apply max power to pwr_cal_data (ar->power_*) */
for (j = 0; j < modes[i].pwr_cal_len; j++) {
modes[i].pwr_cal_data[j] = min(modes[i].pwr_cal_data[j],
modes[i].max_power);
}
}
if (ar->heavy_clip & 0xf0) {
ar->power_2G_ht40[0]--;
ar->power_2G_ht40[1]--;
ar->power_2G_ht40[2]--;
}
if (ar->heavy_clip & 0xf) {
ar->power_2G_ht20[0]++;
ar->power_2G_ht20[1]++;
ar->power_2G_ht20[2]++;
}
#undef EDGES
}
static void carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
enum carl9170_bw bw)
{
struct ar9170_calibration_target_power_legacy *ctpl;
struct ar9170_calibration_target_power_ht *ctph;
u8 *ctpres;
int ntargets;
int idx, i, n;
u8 f;
u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS];
if (freq < 3000)
f = freq - 2300;
else
f = (freq - 4800) / 5;
/*
* cycle through the various modes
*
* legacy modes first: 5G, 2G CCK, 2G OFDM
*/
for (i = 0; i < 3; i++) {
switch (i) {
case 0: /* 5 GHz legacy */
ctpl = &ar->eeprom.cal_tgt_pwr_5G[0];
ntargets = AR5416_NUM_5G_TARGET_PWRS;
ctpres = ar->power_5G_leg;
break;
case 1: /* 2.4 GHz CCK */
ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0];
ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS;
ctpres = ar->power_2G_cck;
break;
case 2: /* 2.4 GHz OFDM */
ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0];
ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
ctpres = ar->power_2G_ofdm;
break;
default:
BUG();
}
for (n = 0; n < ntargets; n++) {
if (ctpl[n].freq == 0xff)
break;
pwr_freqs[n] = ctpl[n].freq;
}
ntargets = n;
idx = carl9170_find_freq_idx(ntargets, pwr_freqs, f);
for (n = 0; n < 4; n++)
ctpres[n] = carl9170_interpolate_u8(f,
ctpl[idx + 0].freq, ctpl[idx + 0].power[n],
ctpl[idx + 1].freq, ctpl[idx + 1].power[n]);
}
/* HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40 */
for (i = 0; i < 4; i++) {
switch (i) {
case 0: /* 5 GHz HT 20 */
ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0];
ntargets = AR5416_NUM_5G_TARGET_PWRS;
ctpres = ar->power_5G_ht20;
break;
case 1: /* 5 GHz HT 40 */
ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0];
ntargets = AR5416_NUM_5G_TARGET_PWRS;
ctpres = ar->power_5G_ht40;
break;
case 2: /* 2.4 GHz HT 20 */
ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0];
ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
ctpres = ar->power_2G_ht20;
break;
case 3: /* 2.4 GHz HT 40 */
ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0];
ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
ctpres = ar->power_2G_ht40;
break;
default:
BUG();
}
for (n = 0; n < ntargets; n++) {
if (ctph[n].freq == 0xff)
break;
pwr_freqs[n] = ctph[n].freq;
}
ntargets = n;
idx = carl9170_find_freq_idx(ntargets, pwr_freqs, f);
for (n = 0; n < 8; n++)
ctpres[n] = carl9170_interpolate_u8(f,
ctph[idx + 0].freq, ctph[idx + 0].power[n],
ctph[idx + 1].freq, ctph[idx + 1].power[n]);
}
/* calc. conformance test limits and apply to ar->power*[] */
carl9170_calc_ctl(ar, freq, bw);
}
int carl9170_get_noisefloor(struct ar9170 *ar)
{
static const u32 phy_regs[] = {
AR9170_PHY_REG_CCA, AR9170_PHY_REG_CH2_CCA,
AR9170_PHY_REG_EXT_CCA, AR9170_PHY_REG_CH2_EXT_CCA };
u32 phy_res[ARRAY_SIZE(phy_regs)];
int err, i;
BUILD_BUG_ON(ARRAY_SIZE(phy_regs) != ARRAY_SIZE(ar->noise));
err = carl9170_read_mreg(ar, ARRAY_SIZE(phy_regs), phy_regs, phy_res);
if (err)
return err;
for (i = 0; i < 2; i++) {
ar->noise[i] = sign_extend32(GET_VAL(
AR9170_PHY_CCA_MIN_PWR, phy_res[i]), 8);
ar->noise[i + 2] = sign_extend32(GET_VAL(
AR9170_PHY_EXT_CCA_MIN_PWR, phy_res[i + 2]), 8);
}
if (ar->channel)
ar->survey[ar->channel->hw_value].noise = ar->noise[0];
return 0;
}
static enum carl9170_bw nl80211_to_carl(enum nl80211_channel_type type)
{
switch (type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
return CARL9170_BW_20;
case NL80211_CHAN_HT40MINUS:
return CARL9170_BW_40_BELOW;
case NL80211_CHAN_HT40PLUS:
return CARL9170_BW_40_ABOVE;
default:
BUG();
}
}
int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
enum nl80211_channel_type _bw)
{
const struct carl9170_phy_freq_params *freqpar;
struct carl9170_rf_init_result rf_res;
struct carl9170_rf_init rf;
u32 tmp, offs = 0, new_ht = 0;
int err;
enum carl9170_bw bw;
struct ieee80211_channel *old_channel = NULL;
bw = nl80211_to_carl(_bw);
if (conf_is_ht(&ar->hw->conf))
new_ht |= CARL9170FW_PHY_HT_ENABLE;
if (conf_is_ht40(&ar->hw->conf))
new_ht |= CARL9170FW_PHY_HT_DYN2040;
/* may be NULL at first setup */
if (ar->channel) {
old_channel = ar->channel;
ar->channel = NULL;
}
/* cold reset BB/ADDA */
err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET,
AR9170_PWR_RESET_BB_COLD_RESET);
if (err)
return err;
err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, 0x0);
if (err)
return err;
err = carl9170_init_phy(ar, channel->band);
if (err)
return err;
err = carl9170_init_rf_banks_0_7(ar,
channel->band == NL80211_BAND_5GHZ);
if (err)
return err;
err = carl9170_exec_cmd(ar, CARL9170_CMD_FREQ_START, 0, NULL, 0, NULL);
if (err)
return err;
err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
0x200);
if (err)
return err;
err = carl9170_init_rf_bank4_pwr(ar,
channel->band == NL80211_BAND_5GHZ,
channel->center_freq, bw);
if (err)
return err;
tmp = AR9170_PHY_TURBO_FC_SINGLE_HT_LTF1 |
AR9170_PHY_TURBO_FC_HT_EN;
switch (bw) {
case CARL9170_BW_20:
break;
case CARL9170_BW_40_BELOW:
tmp |= AR9170_PHY_TURBO_FC_DYN2040_EN |
AR9170_PHY_TURBO_FC_SHORT_GI_40;
offs = 3;
break;
case CARL9170_BW_40_ABOVE:
tmp |= AR9170_PHY_TURBO_FC_DYN2040_EN |
AR9170_PHY_TURBO_FC_SHORT_GI_40 |
AR9170_PHY_TURBO_FC_DYN2040_PRI_CH;
offs = 1;
break;
default:
BUG();
return -ENOSYS;
}
if (ar->eeprom.tx_mask != 1)
tmp |= AR9170_PHY_TURBO_FC_WALSH;
err = carl9170_write_reg(ar, AR9170_PHY_REG_TURBO, tmp);
if (err)
return err;
err = carl9170_set_freq_cal_data(ar, channel);
if (err)
return err;
carl9170_set_power_cal(ar, channel->center_freq, bw);
err = carl9170_set_mac_tpc(ar, channel);
if (err)
return err;
freqpar = carl9170_get_hw_dyn_params(channel, bw);
rf.ht_settings = new_ht;
if (conf_is_ht40(&ar->hw->conf))
SET_VAL(CARL9170FW_PHY_HT_EXT_CHAN_OFF, rf.ht_settings, offs);
rf.freq = cpu_to_le32(channel->center_freq * 1000);
rf.delta_slope_coeff_exp = cpu_to_le32(freqpar->coeff_exp);
rf.delta_slope_coeff_man = cpu_to_le32(freqpar->coeff_man);
rf.delta_slope_coeff_exp_shgi = cpu_to_le32(freqpar->coeff_exp_shgi);
rf.delta_slope_coeff_man_shgi = cpu_to_le32(freqpar->coeff_man_shgi);
rf.finiteLoopCount = cpu_to_le32(2000);
err = carl9170_exec_cmd(ar, CARL9170_CMD_RF_INIT, sizeof(rf), &rf,
sizeof(rf_res), &rf_res);
if (err)
return err;
err = le32_to_cpu(rf_res.ret);
if (err != 0) {
ar->chan_fail++;
ar->total_chan_fail++;
wiphy_err(ar->hw->wiphy, "channel change: %d -> %d "
"failed (%d).\n", old_channel ?
old_channel->center_freq : -1, channel->center_freq,
err);
if (ar->chan_fail > 3) {
/* We have tried very hard to change to _another_
* channel and we've failed to do so!
* Chances are that the PHY/RF is no longer
* operable (due to corruptions/fatal events/bugs?)
* and we need to reset at a higher level.
*/
carl9170_restart(ar, CARL9170_RR_TOO_MANY_PHY_ERRORS);
return 0;
}
err = carl9170_set_channel(ar, channel, _bw);
if (err)
return err;
} else {
ar->chan_fail = 0;
}
if (ar->heavy_clip) {
err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
0x200 | ar->heavy_clip);
if (err) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "failed to set "
"heavy clip\n");
}
return err;
}
}
ar->channel = channel;
ar->ht_settings = new_ht;
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/carl9170/phy.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.